diff --git a/.gitattributes b/.gitattributes index 5ca500838b66b97b0476aac04bb5b4b15b9c4577..2e0ba4084e2414fed4a03f6031fef3862b43708a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -63,3 +63,5 @@ lib/python3.10/site-packages/libkenlm.so filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/tokenizers/tokenizers.abi3.so filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/google/_upb/_message.abi3.so filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/propcache/_helpers_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/rpds/rpds.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/grpc/_cython/cygrpc.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/lib/python3.10/site-packages/grpc/_cython/cygrpc.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/grpc/_cython/cygrpc.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e1a03993a372c8f2e403d2f7b9ad83f65016d437 --- /dev/null +++ b/lib/python3.10/site-packages/grpc/_cython/cygrpc.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c7102abde1cc85028759add9b76688e59cf5915a7c2c9e59061cd0bee2d6254 +size 13677208 diff --git a/lib/python3.10/site-packages/nltk/app/__init__.py b/lib/python3.10/site-packages/nltk/app/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d4bbf1831e714c40514313293ae9027e181b8a77 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/app/__init__.py @@ -0,0 +1,47 @@ +# Natural Language Toolkit: Applications package +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Interactive NLTK Applications: + +chartparser: Chart Parser +chunkparser: Regular-Expression Chunk Parser +collocations: Find collocations in text +concordance: Part-of-speech concordancer +nemo: Finding (and Replacing) Nemo regular expression tool +rdparser: Recursive Descent Parser +srparser: Shift-Reduce Parser +wordnet: WordNet Browser +""" + + +# Import Tkinter-based modules if Tkinter is installed +try: + import tkinter +except ImportError: + import warnings + + warnings.warn("nltk.app package not loaded (please install Tkinter library).") +else: + from nltk.app.chartparser_app import app as chartparser + from nltk.app.chunkparser_app import app as chunkparser + from nltk.app.collocations_app import app as collocations + from nltk.app.concordance_app import app as concordance + from nltk.app.nemo_app import app as nemo + from nltk.app.rdparser_app import app as rdparser + from nltk.app.srparser_app import app as srparser + from nltk.app.wordnet_app import app as wordnet + + try: + from matplotlib import pylab + except ImportError: + import warnings + + warnings.warn("nltk.app.wordfreq not loaded (requires the matplotlib library).") + else: + from nltk.app.wordfreq_app import app as wordfreq diff --git a/lib/python3.10/site-packages/nltk/app/chartparser_app.py b/lib/python3.10/site-packages/nltk/app/chartparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..53a938c642c6dcfe23fc085205cac3a541821207 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/app/chartparser_app.py @@ -0,0 +1,2569 @@ +# Natural Language Toolkit: Chart Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Jean Mark Gawron +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring chart parsing. + +Chart parsing is a flexible parsing algorithm that uses a data +structure called a "chart" to record hypotheses about syntactic +constituents. Each hypothesis is represented by a single "edge" on +the chart. A set of "chart rules" determine when new edges can be +added to the chart. This set of rules controls the overall behavior +of the parser (e.g. whether it parses top-down or bottom-up). + +The chart parsing tool demonstrates the process of parsing a single +sentence, with a given grammar and lexicon. Its display is divided +into three sections: the bottom section displays the chart; the middle +section displays the sentence; and the top section displays the +partial syntax tree corresponding to the selected edge. Buttons along +the bottom of the window are used to control the execution of the +algorithm. + +The chart parsing tool allows for flexible control of the parsing +algorithm. At each step of the algorithm, you can select which rule +or strategy you wish to apply. This allows you to experiment with +mixing different strategies (e.g. top-down and bottom-up). You can +exercise fine-grained control over the algorithm by selecting which +edge you wish to apply a rule to. +""" + +# At some point, we should rewrite this tool to use the new canvas +# widget system. + + +import os.path +import pickle +from tkinter import ( + Button, + Canvas, + Checkbutton, + Frame, + IntVar, + Label, + Menu, + Scrollbar, + Tk, + Toplevel, +) +from tkinter.filedialog import askopenfilename, asksaveasfilename +from tkinter.font import Font +from tkinter.messagebox import showerror, showinfo + +from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import ( + CanvasFrame, + ColorizedList, + EntryDialog, + MutableOptionMenu, + ShowText, + SymbolWidget, +) +from nltk.grammar import CFG, Nonterminal +from nltk.parse.chart import ( + BottomUpPredictCombineRule, + BottomUpPredictRule, + Chart, + LeafEdge, + LeafInitRule, + SingleEdgeFundamentalRule, + SteppingChartParser, + TopDownInitRule, + TopDownPredictRule, + TreeEdge, +) +from nltk.tree import Tree +from nltk.util import in_idle + +# Known bug: ChartView doesn't handle edges generated by epsilon +# productions (e.g., [Production: PP -> ]) very well. + +####################################################################### +# Edge List +####################################################################### + + +class EdgeList(ColorizedList): + ARROW = SymbolWidget.SYMBOLS["rightarrow"] + + def _init_colortags(self, textwidget, options): + textwidget.tag_config("terminal", foreground="#006000") + textwidget.tag_config("arrow", font="symbol", underline="0") + textwidget.tag_config("dot", foreground="#000000") + textwidget.tag_config( + "nonterminal", foreground="blue", font=("helvetica", -12, "bold") + ) + + def _item_repr(self, item): + contents = [] + contents.append(("%s\t" % item.lhs(), "nonterminal")) + contents.append((self.ARROW, "arrow")) + for i, elt in enumerate(item.rhs()): + if i == item.dot(): + contents.append((" *", "dot")) + if isinstance(elt, Nonterminal): + contents.append((" %s" % elt.symbol(), "nonterminal")) + else: + contents.append((" %r" % elt, "terminal")) + if item.is_complete(): + contents.append((" *", "dot")) + return contents + + +####################################################################### +# Chart Matrix View +####################################################################### + + +class ChartMatrixView: + """ + A view of a chart that displays the contents of the corresponding matrix. + """ + + def __init__( + self, parent, chart, toplevel=True, title="Chart Matrix", show_numedges=False + ): + self._chart = chart + self._cells = [] + self._marks = [] + + self._selected_cell = None + + if toplevel: + self._root = Toplevel(parent) + self._root.title(title) + self._root.bind("", self.destroy) + self._init_quit(self._root) + else: + self._root = Frame(parent) + + self._init_matrix(self._root) + self._init_list(self._root) + if show_numedges: + self._init_numedges(self._root) + else: + self._numedges_label = None + + self._callbacks = {} + + self._num_edges = 0 + + self.draw() + + def _init_quit(self, root): + quit = Button(root, text="Quit", command=self.destroy) + quit.pack(side="bottom", expand=0, fill="none") + + def _init_matrix(self, root): + cframe = Frame(root, border=2, relief="sunken") + cframe.pack(expand=0, fill="none", padx=1, pady=3, side="top") + self._canvas = Canvas(cframe, width=200, height=200, background="white") + self._canvas.pack(expand=0, fill="none") + + def _init_numedges(self, root): + self._numedges_label = Label(root, text="0 edges") + self._numedges_label.pack(expand=0, fill="none", side="top") + + def _init_list(self, root): + self._list = EdgeList(root, [], width=20, height=5) + self._list.pack(side="top", expand=1, fill="both", pady=3) + + def cb(edge, self=self): + self._fire_callbacks("select", edge) + + self._list.add_callback("select", cb) + self._list.focus() + + def destroy(self, *e): + if self._root is None: + return + try: + self._root.destroy() + except: + pass + self._root = None + + def set_chart(self, chart): + if chart is not self._chart: + self._chart = chart + self._num_edges = 0 + self.draw() + + def update(self): + if self._root is None: + return + + # Count the edges in each cell + N = len(self._cells) + cell_edges = [[0 for i in range(N)] for j in range(N)] + for edge in self._chart: + cell_edges[edge.start()][edge.end()] += 1 + + # Color the cells correspondingly. + for i in range(N): + for j in range(i, N): + if cell_edges[i][j] == 0: + color = "gray20" + else: + color = "#00{:02x}{:02x}".format( + min(255, 50 + 128 * cell_edges[i][j] / 10), + max(0, 128 - 128 * cell_edges[i][j] / 10), + ) + cell_tag = self._cells[i][j] + self._canvas.itemconfig(cell_tag, fill=color) + if (i, j) == self._selected_cell: + self._canvas.itemconfig(cell_tag, outline="#00ffff", width=3) + self._canvas.tag_raise(cell_tag) + else: + self._canvas.itemconfig(cell_tag, outline="black", width=1) + + # Update the edge list. + edges = list(self._chart.select(span=self._selected_cell)) + self._list.set(edges) + + # Update our edge count. + self._num_edges = self._chart.num_edges() + if self._numedges_label is not None: + self._numedges_label["text"] = "%d edges" % self._num_edges + + def activate(self): + self._canvas.itemconfig("inactivebox", state="hidden") + self.update() + + def inactivate(self): + self._canvas.itemconfig("inactivebox", state="normal") + self.update() + + def add_callback(self, event, func): + self._callbacks.setdefault(event, {})[func] = 1 + + def remove_callback(self, event, func=None): + if func is None: + del self._callbacks[event] + else: + try: + del self._callbacks[event][func] + except: + pass + + def _fire_callbacks(self, event, *args): + if event not in self._callbacks: + return + for cb_func in list(self._callbacks[event].keys()): + cb_func(*args) + + def select_cell(self, i, j): + if self._root is None: + return + + # If the cell is already selected (and the chart contents + # haven't changed), then do nothing. + if (i, j) == self._selected_cell and self._chart.num_edges() == self._num_edges: + return + + self._selected_cell = (i, j) + self.update() + + # Fire the callback. + self._fire_callbacks("select_cell", i, j) + + def deselect_cell(self): + if self._root is None: + return + self._selected_cell = None + self._list.set([]) + self.update() + + def _click_cell(self, i, j): + if self._selected_cell == (i, j): + self.deselect_cell() + else: + self.select_cell(i, j) + + def view_edge(self, edge): + self.select_cell(*edge.span()) + self._list.view(edge) + + def mark_edge(self, edge): + if self._root is None: + return + self.select_cell(*edge.span()) + self._list.mark(edge) + + def unmark_edge(self, edge=None): + if self._root is None: + return + self._list.unmark(edge) + + def markonly_edge(self, edge): + if self._root is None: + return + self.select_cell(*edge.span()) + self._list.markonly(edge) + + def draw(self): + if self._root is None: + return + LEFT_MARGIN = BOT_MARGIN = 15 + TOP_MARGIN = 5 + c = self._canvas + c.delete("all") + N = self._chart.num_leaves() + 1 + dx = (int(c["width"]) - LEFT_MARGIN) / N + dy = (int(c["height"]) - TOP_MARGIN - BOT_MARGIN) / N + + c.delete("all") + + # Labels and dotted lines + for i in range(N): + c.create_text( + LEFT_MARGIN - 2, i * dy + dy / 2 + TOP_MARGIN, text=repr(i), anchor="e" + ) + c.create_text( + i * dx + dx / 2 + LEFT_MARGIN, + N * dy + TOP_MARGIN + 1, + text=repr(i), + anchor="n", + ) + c.create_line( + LEFT_MARGIN, + dy * (i + 1) + TOP_MARGIN, + dx * N + LEFT_MARGIN, + dy * (i + 1) + TOP_MARGIN, + dash=".", + ) + c.create_line( + dx * i + LEFT_MARGIN, + TOP_MARGIN, + dx * i + LEFT_MARGIN, + dy * N + TOP_MARGIN, + dash=".", + ) + + # A box around the whole thing + c.create_rectangle( + LEFT_MARGIN, TOP_MARGIN, LEFT_MARGIN + dx * N, dy * N + TOP_MARGIN, width=2 + ) + + # Cells + self._cells = [[None for i in range(N)] for j in range(N)] + for i in range(N): + for j in range(i, N): + t = c.create_rectangle( + j * dx + LEFT_MARGIN, + i * dy + TOP_MARGIN, + (j + 1) * dx + LEFT_MARGIN, + (i + 1) * dy + TOP_MARGIN, + fill="gray20", + ) + self._cells[i][j] = t + + def cb(event, self=self, i=i, j=j): + self._click_cell(i, j) + + c.tag_bind(t, "", cb) + + # Inactive box + xmax, ymax = int(c["width"]), int(c["height"]) + t = c.create_rectangle( + -100, + -100, + xmax + 100, + ymax + 100, + fill="gray50", + state="hidden", + tag="inactivebox", + ) + c.tag_lower(t) + + # Update the cells. + self.update() + + def pack(self, *args, **kwargs): + self._root.pack(*args, **kwargs) + + +####################################################################### +# Chart Results View +####################################################################### + + +class ChartResultsView: + def __init__(self, parent, chart, grammar, toplevel=True): + self._chart = chart + self._grammar = grammar + self._trees = [] + self._y = 10 + self._treewidgets = [] + self._selection = None + self._selectbox = None + + if toplevel: + self._root = Toplevel(parent) + self._root.title("Chart Parser Application: Results") + self._root.bind("", self.destroy) + else: + self._root = Frame(parent) + + # Buttons + if toplevel: + buttons = Frame(self._root) + buttons.pack(side="bottom", expand=0, fill="x") + Button(buttons, text="Quit", command=self.destroy).pack(side="right") + Button(buttons, text="Print All", command=self.print_all).pack(side="left") + Button(buttons, text="Print Selection", command=self.print_selection).pack( + side="left" + ) + + # Canvas frame. + self._cframe = CanvasFrame(self._root, closeenough=20) + self._cframe.pack(side="top", expand=1, fill="both") + + # Initial update + self.update() + + def update(self, edge=None): + if self._root is None: + return + # If the edge isn't a parse edge, do nothing. + if edge is not None: + if edge.lhs() != self._grammar.start(): + return + if edge.span() != (0, self._chart.num_leaves()): + return + + for parse in self._chart.parses(self._grammar.start()): + if parse not in self._trees: + self._add(parse) + + def _add(self, parse): + # Add it to self._trees. + self._trees.append(parse) + + # Create a widget for it. + c = self._cframe.canvas() + treewidget = tree_to_treesegment(c, parse) + + # Add it to the canvas frame. + self._treewidgets.append(treewidget) + self._cframe.add_widget(treewidget, 10, self._y) + + # Register callbacks. + treewidget.bind_click(self._click) + + # Update y. + self._y = treewidget.bbox()[3] + 10 + + def _click(self, widget): + c = self._cframe.canvas() + if self._selection is not None: + c.delete(self._selectbox) + self._selection = widget + (x1, y1, x2, y2) = widget.bbox() + self._selectbox = c.create_rectangle(x1, y1, x2, y2, width=2, outline="#088") + + def _color(self, treewidget, color): + treewidget.label()["color"] = color + for child in treewidget.subtrees(): + if isinstance(child, TreeSegmentWidget): + self._color(child, color) + else: + child["color"] = color + + def print_all(self, *e): + if self._root is None: + return + self._cframe.print_to_file() + + def print_selection(self, *e): + if self._root is None: + return + if self._selection is None: + showerror("Print Error", "No tree selected") + else: + c = self._cframe.canvas() + for widget in self._treewidgets: + if widget is not self._selection: + self._cframe.destroy_widget(widget) + c.delete(self._selectbox) + (x1, y1, x2, y2) = self._selection.bbox() + self._selection.move(10 - x1, 10 - y1) + c["scrollregion"] = f"0 0 {x2 - x1 + 20} {y2 - y1 + 20}" + self._cframe.print_to_file() + + # Restore our state. + self._treewidgets = [self._selection] + self.clear() + self.update() + + def clear(self): + if self._root is None: + return + for treewidget in self._treewidgets: + self._cframe.destroy_widget(treewidget) + self._trees = [] + self._treewidgets = [] + if self._selection is not None: + self._cframe.canvas().delete(self._selectbox) + self._selection = None + self._y = 10 + + def set_chart(self, chart): + self.clear() + self._chart = chart + self.update() + + def set_grammar(self, grammar): + self.clear() + self._grammar = grammar + self.update() + + def destroy(self, *e): + if self._root is None: + return + try: + self._root.destroy() + except: + pass + self._root = None + + def pack(self, *args, **kwargs): + self._root.pack(*args, **kwargs) + + +####################################################################### +# Chart Comparer +####################################################################### + + +class ChartComparer: + """ + + :ivar _root: The root window + + :ivar _charts: A dictionary mapping names to charts. When + charts are loaded, they are added to this dictionary. + + :ivar _left_chart: The left ``Chart``. + :ivar _left_name: The name ``_left_chart`` (derived from filename) + :ivar _left_matrix: The ``ChartMatrixView`` for ``_left_chart`` + :ivar _left_selector: The drop-down ``MutableOptionsMenu`` used + to select ``_left_chart``. + + :ivar _right_chart: The right ``Chart``. + :ivar _right_name: The name ``_right_chart`` (derived from filename) + :ivar _right_matrix: The ``ChartMatrixView`` for ``_right_chart`` + :ivar _right_selector: The drop-down ``MutableOptionsMenu`` used + to select ``_right_chart``. + + :ivar _out_chart: The out ``Chart``. + :ivar _out_name: The name ``_out_chart`` (derived from filename) + :ivar _out_matrix: The ``ChartMatrixView`` for ``_out_chart`` + :ivar _out_label: The label for ``_out_chart``. + + :ivar _op_label: A Label containing the most recent operation. + """ + + _OPSYMBOL = { + "-": "-", + "and": SymbolWidget.SYMBOLS["intersection"], + "or": SymbolWidget.SYMBOLS["union"], + } + + def __init__(self, *chart_filenames): + # This chart is displayed when we don't have a value (eg + # before any chart is loaded). + faketok = [""] * 8 + self._emptychart = Chart(faketok) + + # The left & right charts start out empty. + self._left_name = "None" + self._right_name = "None" + self._left_chart = self._emptychart + self._right_chart = self._emptychart + + # The charts that have been loaded. + self._charts = {"None": self._emptychart} + + # The output chart. + self._out_chart = self._emptychart + + # The most recent operation + self._operator = None + + # Set up the root window. + self._root = Tk() + self._root.title("Chart Comparison") + self._root.bind("", self.destroy) + self._root.bind("", self.destroy) + + # Initialize all widgets, etc. + self._init_menubar(self._root) + self._init_chartviews(self._root) + self._init_divider(self._root) + self._init_buttons(self._root) + self._init_bindings(self._root) + + # Load any specified charts. + for filename in chart_filenames: + self.load_chart(filename) + + def destroy(self, *e): + if self._root is None: + return + try: + self._root.destroy() + except: + pass + self._root = None + + def mainloop(self, *args, **kwargs): + return + self._root.mainloop(*args, **kwargs) + + # //////////////////////////////////////////////////////////// + # Initialization + # //////////////////////////////////////////////////////////// + + def _init_menubar(self, root): + menubar = Menu(root) + + # File menu + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Load Chart", + accelerator="Ctrl-o", + underline=0, + command=self.load_chart_dialog, + ) + filemenu.add_command( + label="Save Output", + accelerator="Ctrl-s", + underline=0, + command=self.save_chart_dialog, + ) + filemenu.add_separator() + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + # Compare menu + opmenu = Menu(menubar, tearoff=0) + opmenu.add_command( + label="Intersection", command=self._intersection, accelerator="+" + ) + opmenu.add_command(label="Union", command=self._union, accelerator="*") + opmenu.add_command( + label="Difference", command=self._difference, accelerator="-" + ) + opmenu.add_separator() + opmenu.add_command(label="Swap Charts", command=self._swapcharts) + menubar.add_cascade(label="Compare", underline=0, menu=opmenu) + + # Add the menu + self._root.config(menu=menubar) + + def _init_divider(self, root): + divider = Frame(root, border=2, relief="sunken") + divider.pack(side="top", fill="x", ipady=2) + + def _init_chartviews(self, root): + opfont = ("symbol", -36) # Font for operator. + eqfont = ("helvetica", -36) # Font for equals sign. + + frame = Frame(root, background="#c0c0c0") + frame.pack(side="top", expand=1, fill="both") + + # The left matrix. + cv1_frame = Frame(frame, border=3, relief="groove") + cv1_frame.pack(side="left", padx=8, pady=7, expand=1, fill="both") + self._left_selector = MutableOptionMenu( + cv1_frame, list(self._charts.keys()), command=self._select_left + ) + self._left_selector.pack(side="top", pady=5, fill="x") + self._left_matrix = ChartMatrixView( + cv1_frame, self._emptychart, toplevel=False, show_numedges=True + ) + self._left_matrix.pack(side="bottom", padx=5, pady=5, expand=1, fill="both") + self._left_matrix.add_callback("select", self.select_edge) + self._left_matrix.add_callback("select_cell", self.select_cell) + self._left_matrix.inactivate() + + # The operator. + self._op_label = Label( + frame, text=" ", width=3, background="#c0c0c0", font=opfont + ) + self._op_label.pack(side="left", padx=5, pady=5) + + # The right matrix. + cv2_frame = Frame(frame, border=3, relief="groove") + cv2_frame.pack(side="left", padx=8, pady=7, expand=1, fill="both") + self._right_selector = MutableOptionMenu( + cv2_frame, list(self._charts.keys()), command=self._select_right + ) + self._right_selector.pack(side="top", pady=5, fill="x") + self._right_matrix = ChartMatrixView( + cv2_frame, self._emptychart, toplevel=False, show_numedges=True + ) + self._right_matrix.pack(side="bottom", padx=5, pady=5, expand=1, fill="both") + self._right_matrix.add_callback("select", self.select_edge) + self._right_matrix.add_callback("select_cell", self.select_cell) + self._right_matrix.inactivate() + + # The equals sign + Label(frame, text="=", width=3, background="#c0c0c0", font=eqfont).pack( + side="left", padx=5, pady=5 + ) + + # The output matrix. + out_frame = Frame(frame, border=3, relief="groove") + out_frame.pack(side="left", padx=8, pady=7, expand=1, fill="both") + self._out_label = Label(out_frame, text="Output") + self._out_label.pack(side="top", pady=9) + self._out_matrix = ChartMatrixView( + out_frame, self._emptychart, toplevel=False, show_numedges=True + ) + self._out_matrix.pack(side="bottom", padx=5, pady=5, expand=1, fill="both") + self._out_matrix.add_callback("select", self.select_edge) + self._out_matrix.add_callback("select_cell", self.select_cell) + self._out_matrix.inactivate() + + def _init_buttons(self, root): + buttons = Frame(root) + buttons.pack(side="bottom", pady=5, fill="x", expand=0) + Button(buttons, text="Intersection", command=self._intersection).pack( + side="left" + ) + Button(buttons, text="Union", command=self._union).pack(side="left") + Button(buttons, text="Difference", command=self._difference).pack(side="left") + Frame(buttons, width=20).pack(side="left") + Button(buttons, text="Swap Charts", command=self._swapcharts).pack(side="left") + + Button(buttons, text="Detach Output", command=self._detach_out).pack( + side="right" + ) + + def _init_bindings(self, root): + # root.bind('', self.save_chart) + root.bind("", self.load_chart_dialog) + # root.bind('', self.reset) + + # //////////////////////////////////////////////////////////// + # Input Handling + # //////////////////////////////////////////////////////////// + + def _select_left(self, name): + self._left_name = name + self._left_chart = self._charts[name] + self._left_matrix.set_chart(self._left_chart) + if name == "None": + self._left_matrix.inactivate() + self._apply_op() + + def _select_right(self, name): + self._right_name = name + self._right_chart = self._charts[name] + self._right_matrix.set_chart(self._right_chart) + if name == "None": + self._right_matrix.inactivate() + self._apply_op() + + def _apply_op(self): + if self._operator == "-": + self._difference() + elif self._operator == "or": + self._union() + elif self._operator == "and": + self._intersection() + + # //////////////////////////////////////////////////////////// + # File + # //////////////////////////////////////////////////////////// + CHART_FILE_TYPES = [("Pickle file", ".pickle"), ("All files", "*")] + + def save_chart_dialog(self, *args): + filename = asksaveasfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + with open(filename, "wb") as outfile: + pickle.dump(self._out_chart, outfile) + except Exception as e: + showerror("Error Saving Chart", f"Unable to open file: {filename!r}\n{e}") + + def load_chart_dialog(self, *args): + filename = askopenfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + self.load_chart(filename) + except Exception as e: + showerror("Error Loading Chart", f"Unable to open file: {filename!r}\n{e}") + + def load_chart(self, filename): + with open(filename, "rb") as infile: + chart = pickle.load(infile) + name = os.path.basename(filename) + if name.endswith(".pickle"): + name = name[:-7] + if name.endswith(".chart"): + name = name[:-6] + self._charts[name] = chart + self._left_selector.add(name) + self._right_selector.add(name) + + # If either left_matrix or right_matrix is empty, then + # display the new chart. + if self._left_chart is self._emptychart: + self._left_selector.set(name) + elif self._right_chart is self._emptychart: + self._right_selector.set(name) + + def _update_chartviews(self): + self._left_matrix.update() + self._right_matrix.update() + self._out_matrix.update() + + # //////////////////////////////////////////////////////////// + # Selection + # //////////////////////////////////////////////////////////// + + def select_edge(self, edge): + if edge in self._left_chart: + self._left_matrix.markonly_edge(edge) + else: + self._left_matrix.unmark_edge() + if edge in self._right_chart: + self._right_matrix.markonly_edge(edge) + else: + self._right_matrix.unmark_edge() + if edge in self._out_chart: + self._out_matrix.markonly_edge(edge) + else: + self._out_matrix.unmark_edge() + + def select_cell(self, i, j): + self._left_matrix.select_cell(i, j) + self._right_matrix.select_cell(i, j) + self._out_matrix.select_cell(i, j) + + # //////////////////////////////////////////////////////////// + # Operations + # //////////////////////////////////////////////////////////// + + def _difference(self): + if not self._checkcompat(): + return + + out_chart = Chart(self._left_chart.tokens()) + for edge in self._left_chart: + if edge not in self._right_chart: + out_chart.insert(edge, []) + + self._update("-", out_chart) + + def _intersection(self): + if not self._checkcompat(): + return + + out_chart = Chart(self._left_chart.tokens()) + for edge in self._left_chart: + if edge in self._right_chart: + out_chart.insert(edge, []) + + self._update("and", out_chart) + + def _union(self): + if not self._checkcompat(): + return + + out_chart = Chart(self._left_chart.tokens()) + for edge in self._left_chart: + out_chart.insert(edge, []) + for edge in self._right_chart: + out_chart.insert(edge, []) + + self._update("or", out_chart) + + def _swapcharts(self): + left, right = self._left_name, self._right_name + self._left_selector.set(right) + self._right_selector.set(left) + + def _checkcompat(self): + if ( + self._left_chart.tokens() != self._right_chart.tokens() + or self._left_chart.property_names() != self._right_chart.property_names() + or self._left_chart == self._emptychart + or self._right_chart == self._emptychart + ): + # Clear & inactivate the output chart. + self._out_chart = self._emptychart + self._out_matrix.set_chart(self._out_chart) + self._out_matrix.inactivate() + self._out_label["text"] = "Output" + # Issue some other warning? + return False + else: + return True + + def _update(self, operator, out_chart): + self._operator = operator + self._op_label["text"] = self._OPSYMBOL[operator] + self._out_chart = out_chart + self._out_matrix.set_chart(out_chart) + self._out_label["text"] = "{} {} {}".format( + self._left_name, + self._operator, + self._right_name, + ) + + def _clear_out_chart(self): + self._out_chart = self._emptychart + self._out_matrix.set_chart(self._out_chart) + self._op_label["text"] = " " + self._out_matrix.inactivate() + + def _detach_out(self): + ChartMatrixView(self._root, self._out_chart, title=self._out_label["text"]) + + +####################################################################### +# Chart View +####################################################################### + + +class ChartView: + """ + A component for viewing charts. This is used by ``ChartParserApp`` to + allow students to interactively experiment with various chart + parsing techniques. It is also used by ``Chart.draw()``. + + :ivar _chart: The chart that we are giving a view of. This chart + may be modified; after it is modified, you should call + ``update``. + :ivar _sentence: The list of tokens that the chart spans. + + :ivar _root: The root window. + :ivar _chart_canvas: The canvas we're using to display the chart + itself. + :ivar _tree_canvas: The canvas we're using to display the tree + that each edge spans. May be None, if we're not displaying + trees. + :ivar _sentence_canvas: The canvas we're using to display the sentence + text. May be None, if we're not displaying the sentence text. + :ivar _edgetags: A dictionary mapping from edges to the tags of + the canvas elements (lines, etc) used to display that edge. + The values of this dictionary have the form + ``(linetag, rhstag1, dottag, rhstag2, lhstag)``. + :ivar _treetags: A list of all the tags that make up the tree; + used to erase the tree (without erasing the loclines). + :ivar _chart_height: The height of the chart canvas. + :ivar _sentence_height: The height of the sentence canvas. + :ivar _tree_height: The height of the tree + + :ivar _text_height: The height of a text string (in the normal + font). + + :ivar _edgelevels: A list of edges at each level of the chart (the + top level is the 0th element). This list is used to remember + where edges should be drawn; and to make sure that no edges + are overlapping on the chart view. + + :ivar _unitsize: Pixel size of one unit (from the location). This + is determined by the span of the chart's location, and the + width of the chart display canvas. + + :ivar _fontsize: The current font size + + :ivar _marks: A dictionary from edges to marks. Marks are + strings, specifying colors (e.g. 'green'). + """ + + _LEAF_SPACING = 10 + _MARGIN = 10 + _TREE_LEVEL_SIZE = 12 + _CHART_LEVEL_SIZE = 40 + + def __init__(self, chart, root=None, **kw): + """ + Construct a new ``Chart`` display. + """ + # Process keyword args. + draw_tree = kw.get("draw_tree", 0) + draw_sentence = kw.get("draw_sentence", 1) + self._fontsize = kw.get("fontsize", -12) + + # The chart! + self._chart = chart + + # Callback functions + self._callbacks = {} + + # Keep track of drawn edges + self._edgelevels = [] + self._edgetags = {} + + # Keep track of which edges are marked. + self._marks = {} + + # These are used to keep track of the set of tree tokens + # currently displayed in the tree canvas. + self._treetoks = [] + self._treetoks_edge = None + self._treetoks_index = 0 + + # Keep track of the tags used to draw the tree + self._tree_tags = [] + + # Put multiple edges on each level? + self._compact = 0 + + # If they didn't provide a main window, then set one up. + if root is None: + top = Tk() + top.title("Chart View") + + def destroy1(e, top=top): + top.destroy() + + def destroy2(top=top): + top.destroy() + + top.bind("q", destroy1) + b = Button(top, text="Done", command=destroy2) + b.pack(side="bottom") + self._root = top + else: + self._root = root + + # Create some fonts. + self._init_fonts(root) + + # Create the chart canvas. + (self._chart_sb, self._chart_canvas) = self._sb_canvas(self._root) + self._chart_canvas["height"] = 300 + self._chart_canvas["closeenough"] = 15 + + # Create the sentence canvas. + if draw_sentence: + cframe = Frame(self._root, relief="sunk", border=2) + cframe.pack(fill="both", side="bottom") + self._sentence_canvas = Canvas(cframe, height=50) + self._sentence_canvas["background"] = "#e0e0e0" + self._sentence_canvas.pack(fill="both") + # self._sentence_canvas['height'] = self._sentence_height + else: + self._sentence_canvas = None + + # Create the tree canvas. + if draw_tree: + (sb, canvas) = self._sb_canvas(self._root, "n", "x") + (self._tree_sb, self._tree_canvas) = (sb, canvas) + self._tree_canvas["height"] = 200 + else: + self._tree_canvas = None + + # Do some analysis to figure out how big the window should be + self._analyze() + self.draw() + self._resize() + self._grow() + + # Set up the configure callback, which will be called whenever + # the window is resized. + self._chart_canvas.bind("", self._configure) + + def _init_fonts(self, root): + self._boldfont = Font(family="helvetica", weight="bold", size=self._fontsize) + self._font = Font(family="helvetica", size=self._fontsize) + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + def _sb_canvas(self, root, expand="y", fill="both", side="bottom"): + """ + Helper for __init__: construct a canvas with a scrollbar. + """ + cframe = Frame(root, relief="sunk", border=2) + cframe.pack(fill=fill, expand=expand, side=side) + canvas = Canvas(cframe, background="#e0e0e0") + + # Give the canvas a scrollbar. + sb = Scrollbar(cframe, orient="vertical") + sb.pack(side="right", fill="y") + canvas.pack(side="left", fill=fill, expand="yes") + + # Connect the scrollbars to the canvas. + sb["command"] = canvas.yview + canvas["yscrollcommand"] = sb.set + + return (sb, canvas) + + def scroll_up(self, *e): + self._chart_canvas.yview("scroll", -1, "units") + + def scroll_down(self, *e): + self._chart_canvas.yview("scroll", 1, "units") + + def page_up(self, *e): + self._chart_canvas.yview("scroll", -1, "pages") + + def page_down(self, *e): + self._chart_canvas.yview("scroll", 1, "pages") + + def _grow(self): + """ + Grow the window, if necessary + """ + # Grow, if need-be + N = self._chart.num_leaves() + width = max( + int(self._chart_canvas["width"]), N * self._unitsize + ChartView._MARGIN * 2 + ) + + # It won't resize without the second (height) line, but I + # don't understand why not. + self._chart_canvas.configure(width=width) + self._chart_canvas.configure(height=self._chart_canvas["height"]) + + self._unitsize = (width - 2 * ChartView._MARGIN) / N + + # Reset the height for the sentence window. + if self._sentence_canvas is not None: + self._sentence_canvas["height"] = self._sentence_height + + def set_font_size(self, size): + self._font.configure(size=-abs(size)) + self._boldfont.configure(size=-abs(size)) + self._sysfont.configure(size=-abs(size)) + self._analyze() + self._grow() + self.draw() + + def get_font_size(self): + return abs(self._fontsize) + + def _configure(self, e): + """ + The configure callback. This is called whenever the window is + resized. It is also called when the window is first mapped. + It figures out the unit size, and redraws the contents of each + canvas. + """ + N = self._chart.num_leaves() + self._unitsize = (e.width - 2 * ChartView._MARGIN) / N + self.draw() + + def update(self, chart=None): + """ + Draw any edges that have not been drawn. This is typically + called when a after modifies the canvas that a CanvasView is + displaying. ``update`` will cause any edges that have been + added to the chart to be drawn. + + If update is given a ``chart`` argument, then it will replace + the current chart with the given chart. + """ + if chart is not None: + self._chart = chart + self._edgelevels = [] + self._marks = {} + self._analyze() + self._grow() + self.draw() + self.erase_tree() + self._resize() + else: + for edge in self._chart: + if edge not in self._edgetags: + self._add_edge(edge) + self._resize() + + def _edge_conflict(self, edge, lvl): + """ + Return True if the given edge overlaps with any edge on the given + level. This is used by _add_edge to figure out what level a + new edge should be added to. + """ + (s1, e1) = edge.span() + for otheredge in self._edgelevels[lvl]: + (s2, e2) = otheredge.span() + if (s1 <= s2 < e1) or (s2 <= s1 < e2) or (s1 == s2 == e1 == e2): + return True + return False + + def _analyze_edge(self, edge): + """ + Given a new edge, recalculate: + + - _text_height + - _unitsize (if the edge text is too big for the current + _unitsize, then increase _unitsize) + """ + c = self._chart_canvas + + if isinstance(edge, TreeEdge): + lhs = edge.lhs() + rhselts = [] + for elt in edge.rhs(): + if isinstance(elt, Nonterminal): + rhselts.append(str(elt.symbol())) + else: + rhselts.append(repr(elt)) + rhs = " ".join(rhselts) + else: + lhs = edge.lhs() + rhs = "" + + for s in (lhs, rhs): + tag = c.create_text( + 0, 0, text=s, font=self._boldfont, anchor="nw", justify="left" + ) + bbox = c.bbox(tag) + c.delete(tag) + width = bbox[2] # + ChartView._LEAF_SPACING + edgelen = max(edge.length(), 1) + self._unitsize = max(self._unitsize, width / edgelen) + self._text_height = max(self._text_height, bbox[3] - bbox[1]) + + def _add_edge(self, edge, minlvl=0): + """ + Add a single edge to the ChartView: + + - Call analyze_edge to recalculate display parameters + - Find an available level + - Call _draw_edge + """ + # Do NOT show leaf edges in the chart. + if isinstance(edge, LeafEdge): + return + + if edge in self._edgetags: + return + self._analyze_edge(edge) + self._grow() + + if not self._compact: + self._edgelevels.append([edge]) + lvl = len(self._edgelevels) - 1 + self._draw_edge(edge, lvl) + self._resize() + return + + # Figure out what level to draw the edge on. + lvl = 0 + while True: + # If this level doesn't exist yet, create it. + while lvl >= len(self._edgelevels): + self._edgelevels.append([]) + self._resize() + + # Check if we can fit the edge in this level. + if lvl >= minlvl and not self._edge_conflict(edge, lvl): + # Go ahead and draw it. + self._edgelevels[lvl].append(edge) + break + + # Try the next level. + lvl += 1 + + self._draw_edge(edge, lvl) + + def view_edge(self, edge): + level = None + for i in range(len(self._edgelevels)): + if edge in self._edgelevels[i]: + level = i + break + if level is None: + return + # Try to view the new edge.. + y = (level + 1) * self._chart_level_size + dy = self._text_height + 10 + self._chart_canvas.yview("moveto", 1.0) + if self._chart_height != 0: + self._chart_canvas.yview("moveto", (y - dy) / self._chart_height) + + def _draw_edge(self, edge, lvl): + """ + Draw a single edge on the ChartView. + """ + c = self._chart_canvas + + # Draw the arrow. + x1 = edge.start() * self._unitsize + ChartView._MARGIN + x2 = edge.end() * self._unitsize + ChartView._MARGIN + if x2 == x1: + x2 += max(4, self._unitsize / 5) + y = (lvl + 1) * self._chart_level_size + linetag = c.create_line(x1, y, x2, y, arrow="last", width=3) + + # Draw a label for the edge. + if isinstance(edge, TreeEdge): + rhs = [] + for elt in edge.rhs(): + if isinstance(elt, Nonterminal): + rhs.append(str(elt.symbol())) + else: + rhs.append(repr(elt)) + pos = edge.dot() + else: + rhs = [] + pos = 0 + + rhs1 = " ".join(rhs[:pos]) + rhs2 = " ".join(rhs[pos:]) + rhstag1 = c.create_text(x1 + 3, y, text=rhs1, font=self._font, anchor="nw") + dotx = c.bbox(rhstag1)[2] + 6 + doty = (c.bbox(rhstag1)[1] + c.bbox(rhstag1)[3]) / 2 + dottag = c.create_oval(dotx - 2, doty - 2, dotx + 2, doty + 2) + rhstag2 = c.create_text(dotx + 6, y, text=rhs2, font=self._font, anchor="nw") + lhstag = c.create_text( + (x1 + x2) / 2, y, text=str(edge.lhs()), anchor="s", font=self._boldfont + ) + + # Keep track of the edge's tags. + self._edgetags[edge] = (linetag, rhstag1, dottag, rhstag2, lhstag) + + # Register a callback for clicking on the edge. + def cb(event, self=self, edge=edge): + self._fire_callbacks("select", edge) + + c.tag_bind(rhstag1, "", cb) + c.tag_bind(rhstag2, "", cb) + c.tag_bind(linetag, "", cb) + c.tag_bind(dottag, "", cb) + c.tag_bind(lhstag, "", cb) + + self._color_edge(edge) + + def _color_edge(self, edge, linecolor=None, textcolor=None): + """ + Color in an edge with the given colors. + If no colors are specified, use intelligent defaults + (dependent on selection, etc.) + """ + if edge not in self._edgetags: + return + c = self._chart_canvas + + if linecolor is not None and textcolor is not None: + if edge in self._marks: + linecolor = self._marks[edge] + tags = self._edgetags[edge] + c.itemconfig(tags[0], fill=linecolor) + c.itemconfig(tags[1], fill=textcolor) + c.itemconfig(tags[2], fill=textcolor, outline=textcolor) + c.itemconfig(tags[3], fill=textcolor) + c.itemconfig(tags[4], fill=textcolor) + return + else: + N = self._chart.num_leaves() + if edge in self._marks: + self._color_edge(self._marks[edge]) + if edge.is_complete() and edge.span() == (0, N): + self._color_edge(edge, "#084", "#042") + elif isinstance(edge, LeafEdge): + self._color_edge(edge, "#48c", "#246") + else: + self._color_edge(edge, "#00f", "#008") + + def mark_edge(self, edge, mark="#0df"): + """ + Mark an edge + """ + self._marks[edge] = mark + self._color_edge(edge) + + def unmark_edge(self, edge=None): + """ + Unmark an edge (or all edges) + """ + if edge is None: + old_marked_edges = list(self._marks.keys()) + self._marks = {} + for edge in old_marked_edges: + self._color_edge(edge) + else: + del self._marks[edge] + self._color_edge(edge) + + def markonly_edge(self, edge, mark="#0df"): + self.unmark_edge() + self.mark_edge(edge, mark) + + def _analyze(self): + """ + Analyze the sentence string, to figure out how big a unit needs + to be, How big the tree should be, etc. + """ + # Figure out the text height and the unit size. + unitsize = 70 # min unitsize + text_height = 0 + c = self._chart_canvas + + # Check against all tokens + for leaf in self._chart.leaves(): + tag = c.create_text( + 0, 0, text=repr(leaf), font=self._font, anchor="nw", justify="left" + ) + bbox = c.bbox(tag) + c.delete(tag) + width = bbox[2] + ChartView._LEAF_SPACING + unitsize = max(width, unitsize) + text_height = max(text_height, bbox[3] - bbox[1]) + + self._unitsize = unitsize + self._text_height = text_height + self._sentence_height = self._text_height + 2 * ChartView._MARGIN + + # Check against edges. + for edge in self._chart.edges(): + self._analyze_edge(edge) + + # Size of chart levels + self._chart_level_size = self._text_height * 2 + + # Default tree size.. + self._tree_height = 3 * (ChartView._TREE_LEVEL_SIZE + self._text_height) + + # Resize the scrollregions. + self._resize() + + def _resize(self): + """ + Update the scroll-regions for each canvas. This ensures that + everything is within a scroll-region, so the user can use the + scrollbars to view the entire display. This does *not* + resize the window. + """ + c = self._chart_canvas + + # Reset the chart scroll region + width = self._chart.num_leaves() * self._unitsize + ChartView._MARGIN * 2 + + levels = len(self._edgelevels) + self._chart_height = (levels + 2) * self._chart_level_size + c["scrollregion"] = (0, 0, width, self._chart_height) + + # Reset the tree scroll region + if self._tree_canvas: + self._tree_canvas["scrollregion"] = (0, 0, width, self._tree_height) + + def _draw_loclines(self): + """ + Draw location lines. These are vertical gridlines used to + show where each location unit is. + """ + BOTTOM = 50000 + c1 = self._tree_canvas + c2 = self._sentence_canvas + c3 = self._chart_canvas + margin = ChartView._MARGIN + self._loclines = [] + for i in range(0, self._chart.num_leaves() + 1): + x = i * self._unitsize + margin + + if c1: + t1 = c1.create_line(x, 0, x, BOTTOM) + c1.tag_lower(t1) + if c2: + t2 = c2.create_line(x, 0, x, self._sentence_height) + c2.tag_lower(t2) + t3 = c3.create_line(x, 0, x, BOTTOM) + c3.tag_lower(t3) + t4 = c3.create_text(x + 2, 0, text=repr(i), anchor="nw", font=self._font) + c3.tag_lower(t4) + # if i % 4 == 0: + # if c1: c1.itemconfig(t1, width=2, fill='gray60') + # if c2: c2.itemconfig(t2, width=2, fill='gray60') + # c3.itemconfig(t3, width=2, fill='gray60') + if i % 2 == 0: + if c1: + c1.itemconfig(t1, fill="gray60") + if c2: + c2.itemconfig(t2, fill="gray60") + c3.itemconfig(t3, fill="gray60") + else: + if c1: + c1.itemconfig(t1, fill="gray80") + if c2: + c2.itemconfig(t2, fill="gray80") + c3.itemconfig(t3, fill="gray80") + + def _draw_sentence(self): + """Draw the sentence string.""" + if self._chart.num_leaves() == 0: + return + c = self._sentence_canvas + margin = ChartView._MARGIN + y = ChartView._MARGIN + + for i, leaf in enumerate(self._chart.leaves()): + x1 = i * self._unitsize + margin + x2 = x1 + self._unitsize + x = (x1 + x2) / 2 + tag = c.create_text( + x, y, text=repr(leaf), font=self._font, anchor="n", justify="left" + ) + bbox = c.bbox(tag) + rt = c.create_rectangle( + x1 + 2, + bbox[1] - (ChartView._LEAF_SPACING / 2), + x2 - 2, + bbox[3] + (ChartView._LEAF_SPACING / 2), + fill="#f0f0f0", + outline="#f0f0f0", + ) + c.tag_lower(rt) + + def erase_tree(self): + for tag in self._tree_tags: + self._tree_canvas.delete(tag) + self._treetoks = [] + self._treetoks_edge = None + self._treetoks_index = 0 + + def draw_tree(self, edge=None): + if edge is None and self._treetoks_edge is None: + return + if edge is None: + edge = self._treetoks_edge + + # If it's a new edge, then get a new list of treetoks. + if self._treetoks_edge != edge: + self._treetoks = [t for t in self._chart.trees(edge) if isinstance(t, Tree)] + self._treetoks_edge = edge + self._treetoks_index = 0 + + # Make sure there's something to draw. + if len(self._treetoks) == 0: + return + + # Erase the old tree. + for tag in self._tree_tags: + self._tree_canvas.delete(tag) + + # Draw the new tree. + tree = self._treetoks[self._treetoks_index] + self._draw_treetok(tree, edge.start()) + + # Show how many trees are available for the edge. + self._draw_treecycle() + + # Update the scroll region. + w = self._chart.num_leaves() * self._unitsize + 2 * ChartView._MARGIN + h = tree.height() * (ChartView._TREE_LEVEL_SIZE + self._text_height) + self._tree_canvas["scrollregion"] = (0, 0, w, h) + + def cycle_tree(self): + self._treetoks_index = (self._treetoks_index + 1) % len(self._treetoks) + self.draw_tree(self._treetoks_edge) + + def _draw_treecycle(self): + if len(self._treetoks) <= 1: + return + + # Draw the label. + label = "%d Trees" % len(self._treetoks) + c = self._tree_canvas + margin = ChartView._MARGIN + right = self._chart.num_leaves() * self._unitsize + margin - 2 + tag = c.create_text(right, 2, anchor="ne", text=label, font=self._boldfont) + self._tree_tags.append(tag) + _, _, _, y = c.bbox(tag) + + # Draw the triangles. + for i in range(len(self._treetoks)): + x = right - 20 * (len(self._treetoks) - i - 1) + if i == self._treetoks_index: + fill = "#084" + else: + fill = "#fff" + tag = c.create_polygon( + x, y + 10, x - 5, y, x - 10, y + 10, fill=fill, outline="black" + ) + self._tree_tags.append(tag) + + # Set up a callback: show the tree if they click on its + # triangle. + def cb(event, self=self, i=i): + self._treetoks_index = i + self.draw_tree() + + c.tag_bind(tag, "", cb) + + def _draw_treetok(self, treetok, index, depth=0): + """ + :param index: The index of the first leaf in the tree. + :return: The index of the first leaf after the tree. + """ + c = self._tree_canvas + margin = ChartView._MARGIN + + # Draw the children + child_xs = [] + for child in treetok: + if isinstance(child, Tree): + child_x, index = self._draw_treetok(child, index, depth + 1) + child_xs.append(child_x) + else: + child_xs.append((2 * index + 1) * self._unitsize / 2 + margin) + index += 1 + + # If we have children, then get the node's x by averaging their + # node x's. Otherwise, make room for ourselves. + if child_xs: + nodex = sum(child_xs) / len(child_xs) + else: + # [XX] breaks for null productions. + nodex = (2 * index + 1) * self._unitsize / 2 + margin + index += 1 + + # Draw the node + nodey = depth * (ChartView._TREE_LEVEL_SIZE + self._text_height) + tag = c.create_text( + nodex, + nodey, + anchor="n", + justify="center", + text=str(treetok.label()), + fill="#042", + font=self._boldfont, + ) + self._tree_tags.append(tag) + + # Draw lines to the children. + childy = nodey + ChartView._TREE_LEVEL_SIZE + self._text_height + for childx, child in zip(child_xs, treetok): + if isinstance(child, Tree) and child: + # A "real" tree token: + tag = c.create_line( + nodex, + nodey + self._text_height, + childx, + childy, + width=2, + fill="#084", + ) + self._tree_tags.append(tag) + if isinstance(child, Tree) and not child: + # An unexpanded tree token: + tag = c.create_line( + nodex, + nodey + self._text_height, + childx, + childy, + width=2, + fill="#048", + dash="2 3", + ) + self._tree_tags.append(tag) + if not isinstance(child, Tree): + # A leaf: + tag = c.create_line( + nodex, + nodey + self._text_height, + childx, + 10000, + width=2, + fill="#084", + ) + self._tree_tags.append(tag) + + return nodex, index + + def draw(self): + """ + Draw everything (from scratch). + """ + if self._tree_canvas: + self._tree_canvas.delete("all") + self.draw_tree() + + if self._sentence_canvas: + self._sentence_canvas.delete("all") + self._draw_sentence() + + self._chart_canvas.delete("all") + self._edgetags = {} + + # Redraw any edges we erased. + for lvl in range(len(self._edgelevels)): + for edge in self._edgelevels[lvl]: + self._draw_edge(edge, lvl) + + for edge in self._chart: + self._add_edge(edge) + + self._draw_loclines() + + def add_callback(self, event, func): + self._callbacks.setdefault(event, {})[func] = 1 + + def remove_callback(self, event, func=None): + if func is None: + del self._callbacks[event] + else: + try: + del self._callbacks[event][func] + except: + pass + + def _fire_callbacks(self, event, *args): + if event not in self._callbacks: + return + for cb_func in list(self._callbacks[event].keys()): + cb_func(*args) + + +####################################################################### +# Edge Rules +####################################################################### +# These version of the chart rules only apply to a specific edge. +# This lets the user select an edge, and then apply a rule. + + +class EdgeRule: + """ + To create an edge rule, make an empty base class that uses + EdgeRule as the first base class, and the basic rule as the + second base class. (Order matters!) + """ + + def __init__(self, edge): + super = self.__class__.__bases__[1] + self._edge = edge + self.NUM_EDGES = super.NUM_EDGES - 1 + + def apply(self, chart, grammar, *edges): + super = self.__class__.__bases__[1] + edges += (self._edge,) + yield from super.apply(self, chart, grammar, *edges) + + def __str__(self): + super = self.__class__.__bases__[1] + return super.__str__(self) + + +class TopDownPredictEdgeRule(EdgeRule, TopDownPredictRule): + pass + + +class BottomUpEdgeRule(EdgeRule, BottomUpPredictRule): + pass + + +class BottomUpLeftCornerEdgeRule(EdgeRule, BottomUpPredictCombineRule): + pass + + +class FundamentalEdgeRule(EdgeRule, SingleEdgeFundamentalRule): + pass + + +####################################################################### +# Chart Parser Application +####################################################################### + + +class ChartParserApp: + def __init__(self, grammar, tokens, title="Chart Parser Application"): + # Initialize the parser + self._init_parser(grammar, tokens) + + self._root = None + try: + # Create the root window. + self._root = Tk() + self._root.title(title) + self._root.bind("", self.destroy) + + # Set up some frames. + frame3 = Frame(self._root) + frame2 = Frame(self._root) + frame1 = Frame(self._root) + frame3.pack(side="bottom", fill="none") + frame2.pack(side="bottom", fill="x") + frame1.pack(side="bottom", fill="both", expand=1) + + self._init_fonts(self._root) + self._init_animation() + self._init_chartview(frame1) + self._init_rulelabel(frame2) + self._init_buttons(frame3) + self._init_menubar() + + self._matrix = None + self._results = None + + # Set up keyboard bindings. + self._init_bindings() + + except: + print("Error creating Tree View") + self.destroy() + raise + + def destroy(self, *args): + if self._root is None: + return + self._root.destroy() + self._root = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._root.mainloop(*args, **kwargs) + + # //////////////////////////////////////////////////////////// + # Initialization Helpers + # //////////////////////////////////////////////////////////// + + def _init_parser(self, grammar, tokens): + self._grammar = grammar + self._tokens = tokens + self._reset_parser() + + def _reset_parser(self): + self._cp = SteppingChartParser(self._grammar) + self._cp.initialize(self._tokens) + self._chart = self._cp.chart() + + # Insert LeafEdges before the parsing starts. + for _new_edge in LeafInitRule().apply(self._chart, self._grammar): + pass + + # The step iterator -- use this to generate new edges + self._cpstep = self._cp.step() + + # The currently selected edge + self._selection = None + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + + def _init_animation(self): + # Are we stepping? (default=yes) + self._step = IntVar(self._root) + self._step.set(1) + + # What's our animation speed (default=fast) + self._animate = IntVar(self._root) + self._animate.set(3) # Default speed = fast + + # Are we currently animating? + self._animating = 0 + + def _init_chartview(self, parent): + self._cv = ChartView(self._chart, parent, draw_tree=1, draw_sentence=1) + self._cv.add_callback("select", self._click_cv_edge) + + def _init_rulelabel(self, parent): + ruletxt = "Last edge generated by:" + + self._rulelabel1 = Label(parent, text=ruletxt, font=self._boldfont) + self._rulelabel2 = Label( + parent, width=40, relief="groove", anchor="w", font=self._boldfont + ) + self._rulelabel1.pack(side="left") + self._rulelabel2.pack(side="left") + step = Checkbutton(parent, variable=self._step, text="Step") + step.pack(side="right") + + def _init_buttons(self, parent): + frame1 = Frame(parent) + frame2 = Frame(parent) + frame1.pack(side="bottom", fill="x") + frame2.pack(side="top", fill="none") + + Button( + frame1, + text="Reset\nParser", + background="#90c0d0", + foreground="black", + command=self.reset, + ).pack(side="right") + # Button(frame1, text='Pause', + # background='#90c0d0', foreground='black', + # command=self.pause).pack(side='left') + + Button( + frame1, + text="Top Down\nStrategy", + background="#90c0d0", + foreground="black", + command=self.top_down_strategy, + ).pack(side="left") + Button( + frame1, + text="Bottom Up\nStrategy", + background="#90c0d0", + foreground="black", + command=self.bottom_up_strategy, + ).pack(side="left") + Button( + frame1, + text="Bottom Up\nLeft-Corner Strategy", + background="#90c0d0", + foreground="black", + command=self.bottom_up_leftcorner_strategy, + ).pack(side="left") + + Button( + frame2, + text="Top Down Init\nRule", + background="#90f090", + foreground="black", + command=self.top_down_init, + ).pack(side="left") + Button( + frame2, + text="Top Down Predict\nRule", + background="#90f090", + foreground="black", + command=self.top_down_predict, + ).pack(side="left") + Frame(frame2, width=20).pack(side="left") + + Button( + frame2, + text="Bottom Up Predict\nRule", + background="#90f090", + foreground="black", + command=self.bottom_up, + ).pack(side="left") + Frame(frame2, width=20).pack(side="left") + + Button( + frame2, + text="Bottom Up Left-Corner\nPredict Rule", + background="#90f090", + foreground="black", + command=self.bottom_up_leftcorner, + ).pack(side="left") + Frame(frame2, width=20).pack(side="left") + + Button( + frame2, + text="Fundamental\nRule", + background="#90f090", + foreground="black", + command=self.fundamental, + ).pack(side="left") + + def _init_bindings(self): + self._root.bind("", self._cv.scroll_up) + self._root.bind("", self._cv.scroll_down) + self._root.bind("", self._cv.page_up) + self._root.bind("", self._cv.page_down) + self._root.bind("", self.destroy) + self._root.bind("", self.destroy) + self._root.bind("", self.help) + + self._root.bind("", self.save_chart) + self._root.bind("", self.load_chart) + self._root.bind("", self.reset) + + self._root.bind("t", self.top_down_strategy) + self._root.bind("b", self.bottom_up_strategy) + self._root.bind("c", self.bottom_up_leftcorner_strategy) + self._root.bind("", self._stop_animation) + + self._root.bind("", self.edit_grammar) + self._root.bind("", self.edit_sentence) + + # Animation speed control + self._root.bind("-", lambda e, a=self._animate: a.set(1)) + self._root.bind("=", lambda e, a=self._animate: a.set(2)) + self._root.bind("+", lambda e, a=self._animate: a.set(3)) + + # Step control + self._root.bind("s", lambda e, s=self._step: s.set(not s.get())) + + def _init_menubar(self): + menubar = Menu(self._root) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Save Chart", + underline=0, + command=self.save_chart, + accelerator="Ctrl-s", + ) + filemenu.add_command( + label="Load Chart", + underline=0, + command=self.load_chart, + accelerator="Ctrl-o", + ) + filemenu.add_command( + label="Reset Chart", underline=0, command=self.reset, accelerator="Ctrl-r" + ) + filemenu.add_separator() + filemenu.add_command(label="Save Grammar", command=self.save_grammar) + filemenu.add_command(label="Load Grammar", command=self.load_grammar) + filemenu.add_separator() + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + editmenu.add_command( + label="Edit Grammar", + underline=5, + command=self.edit_grammar, + accelerator="Ctrl-g", + ) + editmenu.add_command( + label="Edit Text", + underline=5, + command=self.edit_sentence, + accelerator="Ctrl-t", + ) + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_command( + label="Chart Matrix", underline=6, command=self.view_matrix + ) + viewmenu.add_command(label="Results", underline=0, command=self.view_results) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + rulemenu = Menu(menubar, tearoff=0) + rulemenu.add_command( + label="Top Down Strategy", + underline=0, + command=self.top_down_strategy, + accelerator="t", + ) + rulemenu.add_command( + label="Bottom Up Strategy", + underline=0, + command=self.bottom_up_strategy, + accelerator="b", + ) + rulemenu.add_command( + label="Bottom Up Left-Corner Strategy", + underline=0, + command=self.bottom_up_leftcorner_strategy, + accelerator="c", + ) + rulemenu.add_separator() + rulemenu.add_command(label="Bottom Up Rule", command=self.bottom_up) + rulemenu.add_command( + label="Bottom Up Left-Corner Rule", command=self.bottom_up_leftcorner + ) + rulemenu.add_command(label="Top Down Init Rule", command=self.top_down_init) + rulemenu.add_command( + label="Top Down Predict Rule", command=self.top_down_predict + ) + rulemenu.add_command(label="Fundamental Rule", command=self.fundamental) + menubar.add_cascade(label="Apply", underline=0, menu=rulemenu) + + animatemenu = Menu(menubar, tearoff=0) + animatemenu.add_checkbutton( + label="Step", underline=0, variable=self._step, accelerator="s" + ) + animatemenu.add_separator() + animatemenu.add_radiobutton( + label="No Animation", underline=0, variable=self._animate, value=0 + ) + animatemenu.add_radiobutton( + label="Slow Animation", + underline=0, + variable=self._animate, + value=1, + accelerator="-", + ) + animatemenu.add_radiobutton( + label="Normal Animation", + underline=0, + variable=self._animate, + value=2, + accelerator="=", + ) + animatemenu.add_radiobutton( + label="Fast Animation", + underline=0, + variable=self._animate, + value=3, + accelerator="+", + ) + menubar.add_cascade(label="Animate", underline=1, menu=animatemenu) + + zoommenu = Menu(menubar, tearoff=0) + zoommenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="Zoom", underline=0, menu=zoommenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + self._root.config(menu=menubar) + + # //////////////////////////////////////////////////////////// + # Selection Handling + # //////////////////////////////////////////////////////////// + + def _click_cv_edge(self, edge): + if edge != self._selection: + # Clicking on a new edge selects it. + self._select_edge(edge) + else: + # Repeated clicks on one edge cycle its trees. + self._cv.cycle_tree() + # [XX] this can get confused if animation is running + # faster than the callbacks... + + def _select_matrix_edge(self, edge): + self._select_edge(edge) + self._cv.view_edge(edge) + + def _select_edge(self, edge): + self._selection = edge + # Update the chart view. + self._cv.markonly_edge(edge, "#f00") + self._cv.draw_tree(edge) + # Update the matrix view. + if self._matrix: + self._matrix.markonly_edge(edge) + if self._matrix: + self._matrix.view_edge(edge) + + def _deselect_edge(self): + self._selection = None + # Update the chart view. + self._cv.unmark_edge() + self._cv.erase_tree() + # Update the matrix view + if self._matrix: + self._matrix.unmark_edge() + + def _show_new_edge(self, edge): + self._display_rule(self._cp.current_chartrule()) + # Update the chart view. + self._cv.update() + self._cv.draw_tree(edge) + self._cv.markonly_edge(edge, "#0df") + self._cv.view_edge(edge) + # Update the matrix view. + if self._matrix: + self._matrix.update() + if self._matrix: + self._matrix.markonly_edge(edge) + if self._matrix: + self._matrix.view_edge(edge) + # Update the results view. + if self._results: + self._results.update(edge) + + # //////////////////////////////////////////////////////////// + # Help/usage + # //////////////////////////////////////////////////////////// + + def help(self, *e): + self._animating = 0 + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._root, + "Help: Chart Parser Application", + (__doc__ or "").strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._root, + "Help: Chart Parser Application", + (__doc__ or "").strip(), + width=75, + ) + + def about(self, *e): + ABOUT = "NLTK Chart Parser Application\n" + "Written by Edward Loper" + showinfo("About: Chart Parser Application", ABOUT) + + # //////////////////////////////////////////////////////////// + # File Menu + # //////////////////////////////////////////////////////////// + + CHART_FILE_TYPES = [("Pickle file", ".pickle"), ("All files", "*")] + GRAMMAR_FILE_TYPES = [ + ("Plaintext grammar file", ".cfg"), + ("Pickle file", ".pickle"), + ("All files", "*"), + ] + + def load_chart(self, *args): + "Load a chart from a pickle file" + filename = askopenfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + with open(filename, "rb") as infile: + chart = pickle.load(infile) + self._chart = chart + self._cv.update(chart) + if self._matrix: + self._matrix.set_chart(chart) + if self._matrix: + self._matrix.deselect_cell() + if self._results: + self._results.set_chart(chart) + self._cp.set_chart(chart) + except Exception as e: + raise + showerror("Error Loading Chart", "Unable to open file: %r" % filename) + + def save_chart(self, *args): + "Save a chart to a pickle file" + filename = asksaveasfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + with open(filename, "wb") as outfile: + pickle.dump(self._chart, outfile) + except Exception as e: + raise + showerror("Error Saving Chart", "Unable to open file: %r" % filename) + + def load_grammar(self, *args): + "Load a grammar from a pickle file" + filename = askopenfilename( + filetypes=self.GRAMMAR_FILE_TYPES, defaultextension=".cfg" + ) + if not filename: + return + try: + if filename.endswith(".pickle"): + with open(filename, "rb") as infile: + grammar = pickle.load(infile) + else: + with open(filename) as infile: + grammar = CFG.fromstring(infile.read()) + self.set_grammar(grammar) + except Exception as e: + showerror("Error Loading Grammar", "Unable to open file: %r" % filename) + + def save_grammar(self, *args): + filename = asksaveasfilename( + filetypes=self.GRAMMAR_FILE_TYPES, defaultextension=".cfg" + ) + if not filename: + return + try: + if filename.endswith(".pickle"): + with open(filename, "wb") as outfile: + pickle.dump((self._chart, self._tokens), outfile) + else: + with open(filename, "w") as outfile: + prods = self._grammar.productions() + start = [p for p in prods if p.lhs() == self._grammar.start()] + rest = [p for p in prods if p.lhs() != self._grammar.start()] + for prod in start: + outfile.write("%s\n" % prod) + for prod in rest: + outfile.write("%s\n" % prod) + except Exception as e: + showerror("Error Saving Grammar", "Unable to open file: %r" % filename) + + def reset(self, *args): + self._animating = 0 + self._reset_parser() + self._cv.update(self._chart) + if self._matrix: + self._matrix.set_chart(self._chart) + if self._matrix: + self._matrix.deselect_cell() + if self._results: + self._results.set_chart(self._chart) + + # //////////////////////////////////////////////////////////// + # Edit + # //////////////////////////////////////////////////////////// + + def edit_grammar(self, *e): + CFGEditor(self._root, self._grammar, self.set_grammar) + + def set_grammar(self, grammar): + self._grammar = grammar + self._cp.set_grammar(grammar) + if self._results: + self._results.set_grammar(grammar) + + def edit_sentence(self, *e): + sentence = " ".join(self._tokens) + title = "Edit Text" + instr = "Enter a new sentence to parse." + EntryDialog(self._root, sentence, instr, self.set_sentence, title) + + def set_sentence(self, sentence): + self._tokens = list(sentence.split()) + self.reset() + + # //////////////////////////////////////////////////////////// + # View Menu + # //////////////////////////////////////////////////////////// + + def view_matrix(self, *e): + if self._matrix is not None: + self._matrix.destroy() + self._matrix = ChartMatrixView(self._root, self._chart) + self._matrix.add_callback("select", self._select_matrix_edge) + + def view_results(self, *e): + if self._results is not None: + self._results.destroy() + self._results = ChartResultsView(self._root, self._chart, self._grammar) + + # //////////////////////////////////////////////////////////// + # Zoom Menu + # //////////////////////////////////////////////////////////// + + def resize(self): + self._animating = 0 + self.set_font_size(self._size.get()) + + def set_font_size(self, size): + self._cv.set_font_size(size) + self._font.configure(size=-abs(size)) + self._boldfont.configure(size=-abs(size)) + self._sysfont.configure(size=-abs(size)) + + def get_font_size(self): + return abs(self._size.get()) + + # //////////////////////////////////////////////////////////// + # Parsing + # //////////////////////////////////////////////////////////// + + def apply_strategy(self, strategy, edge_strategy=None): + # If we're animating, then stop. + if self._animating: + self._animating = 0 + return + + # Clear the rule display & mark. + self._display_rule(None) + # self._cv.unmark_edge() + + if self._step.get(): + selection = self._selection + if (selection is not None) and (edge_strategy is not None): + # Apply the given strategy to the selected edge. + self._cp.set_strategy([edge_strategy(selection)]) + newedge = self._apply_strategy() + + # If it failed, then clear the selection. + if newedge is None: + self._cv.unmark_edge() + self._selection = None + else: + self._cp.set_strategy(strategy) + self._apply_strategy() + + else: + self._cp.set_strategy(strategy) + if self._animate.get(): + self._animating = 1 + self._animate_strategy() + else: + for edge in self._cpstep: + if edge is None: + break + self._cv.update() + if self._matrix: + self._matrix.update() + if self._results: + self._results.update() + + def _stop_animation(self, *e): + self._animating = 0 + + def _animate_strategy(self, speed=1): + if self._animating == 0: + return + if self._apply_strategy() is not None: + if self._animate.get() == 0 or self._step.get() == 1: + return + if self._animate.get() == 1: + self._root.after(3000, self._animate_strategy) + elif self._animate.get() == 2: + self._root.after(1000, self._animate_strategy) + else: + self._root.after(20, self._animate_strategy) + + def _apply_strategy(self): + new_edge = next(self._cpstep) + + if new_edge is not None: + self._show_new_edge(new_edge) + return new_edge + + def _display_rule(self, rule): + if rule is None: + self._rulelabel2["text"] = "" + else: + name = str(rule) + self._rulelabel2["text"] = name + size = self._cv.get_font_size() + + # //////////////////////////////////////////////////////////// + # Parsing Strategies + # //////////////////////////////////////////////////////////// + + # Basic rules: + _TD_INIT = [TopDownInitRule()] + _TD_PREDICT = [TopDownPredictRule()] + _BU_RULE = [BottomUpPredictRule()] + _BU_LC_RULE = [BottomUpPredictCombineRule()] + _FUNDAMENTAL = [SingleEdgeFundamentalRule()] + + # Complete strategies: + _TD_STRATEGY = _TD_INIT + _TD_PREDICT + _FUNDAMENTAL + _BU_STRATEGY = _BU_RULE + _FUNDAMENTAL + _BU_LC_STRATEGY = _BU_LC_RULE + _FUNDAMENTAL + + # Button callback functions: + def top_down_init(self, *e): + self.apply_strategy(self._TD_INIT, None) + + def top_down_predict(self, *e): + self.apply_strategy(self._TD_PREDICT, TopDownPredictEdgeRule) + + def bottom_up(self, *e): + self.apply_strategy(self._BU_RULE, BottomUpEdgeRule) + + def bottom_up_leftcorner(self, *e): + self.apply_strategy(self._BU_LC_RULE, BottomUpLeftCornerEdgeRule) + + def fundamental(self, *e): + self.apply_strategy(self._FUNDAMENTAL, FundamentalEdgeRule) + + def bottom_up_strategy(self, *e): + self.apply_strategy(self._BU_STRATEGY, BottomUpEdgeRule) + + def bottom_up_leftcorner_strategy(self, *e): + self.apply_strategy(self._BU_LC_STRATEGY, BottomUpLeftCornerEdgeRule) + + def top_down_strategy(self, *e): + self.apply_strategy(self._TD_STRATEGY, TopDownPredictEdgeRule) + + +def app(): + grammar = CFG.fromstring( + """ + # Grammatical productions. + S -> NP VP + VP -> VP PP | V NP | V + NP -> Det N | NP PP + PP -> P NP + # Lexical productions. + NP -> 'John' | 'I' + Det -> 'the' | 'my' | 'a' + N -> 'dog' | 'cookie' | 'table' | 'cake' | 'fork' + V -> 'ate' | 'saw' + P -> 'on' | 'under' | 'with' + """ + ) + + sent = "John ate the cake on the table with a fork" + sent = "John ate the cake on the table" + tokens = list(sent.split()) + + print("grammar= (") + for rule in grammar.productions(): + print((" ", repr(rule) + ",")) + print(")") + print("tokens = %r" % tokens) + print('Calling "ChartParserApp(grammar, tokens)"...') + ChartParserApp(grammar, tokens).mainloop() + + +if __name__ == "__main__": + app() + + # Chart comparer: + # charts = ['/tmp/earley.pickle', + # '/tmp/topdown.pickle', + # '/tmp/bottomup.pickle'] + # ChartComparer(*charts).mainloop() + + # import profile + # profile.run('demo2()', '/tmp/profile.out') + # import pstats + # p = pstats.Stats('/tmp/profile.out') + # p.strip_dirs().sort_stats('time', 'cum').print_stats(60) + # p.strip_dirs().sort_stats('cum', 'time').print_stats(60) + +__all__ = ["app"] diff --git a/lib/python3.10/site-packages/nltk/app/chunkparser_app.py b/lib/python3.10/site-packages/nltk/app/chunkparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..54a10a1e7db3dde0f3a18447575130e658ba3c51 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/app/chunkparser_app.py @@ -0,0 +1,1500 @@ +# Natural Language Toolkit: Regexp Chunk Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring the regular expression based chunk +parser ``nltk.chunk.RegexpChunkParser``. +""" + +# Todo: Add a way to select the development set from the menubar. This +# might just need to be a selection box (conll vs treebank etc) plus +# configuration parameters to select what's being chunked (eg VP vs NP) +# and what part of the data is being used as the development set. + +import random +import re +import textwrap +import time +from tkinter import ( + Button, + Canvas, + Checkbutton, + Frame, + IntVar, + Label, + Menu, + Scrollbar, + Text, + Tk, +) +from tkinter.filedialog import askopenfilename, asksaveasfilename +from tkinter.font import Font + +from nltk.chunk import ChunkScore, RegexpChunkParser +from nltk.chunk.regexp import RegexpChunkRule +from nltk.corpus import conll2000, treebank_chunk +from nltk.draw.util import ShowText +from nltk.tree import Tree +from nltk.util import in_idle + + +class RegexpChunkApp: + """ + A graphical tool for exploring the regular expression based chunk + parser ``nltk.chunk.RegexpChunkParser``. + + See ``HELP`` for instructional text. + """ + + ##///////////////////////////////////////////////////////////////// + ## Help Text + ##///////////////////////////////////////////////////////////////// + + #: A dictionary mapping from part of speech tags to descriptions, + #: which is used in the help text. (This should probably live with + #: the conll and/or treebank corpus instead.) + TAGSET = { + "CC": "Coordinating conjunction", + "PRP$": "Possessive pronoun", + "CD": "Cardinal number", + "RB": "Adverb", + "DT": "Determiner", + "RBR": "Adverb, comparative", + "EX": "Existential there", + "RBS": "Adverb, superlative", + "FW": "Foreign word", + "RP": "Particle", + "JJ": "Adjective", + "TO": "to", + "JJR": "Adjective, comparative", + "UH": "Interjection", + "JJS": "Adjective, superlative", + "VB": "Verb, base form", + "LS": "List item marker", + "VBD": "Verb, past tense", + "MD": "Modal", + "NNS": "Noun, plural", + "NN": "Noun, singular or masps", + "VBN": "Verb, past participle", + "VBZ": "Verb,3rd ps. sing. present", + "NNP": "Proper noun, singular", + "NNPS": "Proper noun plural", + "WDT": "wh-determiner", + "PDT": "Predeterminer", + "WP": "wh-pronoun", + "POS": "Possessive ending", + "WP$": "Possessive wh-pronoun", + "PRP": "Personal pronoun", + "WRB": "wh-adverb", + "(": "open parenthesis", + ")": "close parenthesis", + "``": "open quote", + ",": "comma", + "''": "close quote", + ".": "period", + "#": "pound sign (currency marker)", + "$": "dollar sign (currency marker)", + "IN": "Preposition/subord. conjunction", + "SYM": "Symbol (mathematical or scientific)", + "VBG": "Verb, gerund/present participle", + "VBP": "Verb, non-3rd ps. sing. present", + ":": "colon", + } + + #: Contents for the help box. This is a list of tuples, one for + #: each help page, where each tuple has four elements: + #: - A title (displayed as a tab) + #: - A string description of tabstops (see Tkinter.Text for details) + #: - The text contents for the help page. You can use expressions + #: like ... to colorize the text; see ``HELP_AUTOTAG`` + #: for a list of tags you can use for colorizing. + HELP = [ + ( + "Help", + "20", + "Welcome to the regular expression chunk-parser grammar editor. " + "You can use this editor to develop and test chunk parser grammars " + "based on NLTK's RegexpChunkParser class.\n\n" + # Help box. + "Use this box ('Help') to learn more about the editor; click on the " + "tabs for help on specific topics:" + "\n" + "Rules: grammar rule types\n" + "Regexps: regular expression syntax\n" + "Tags: part of speech tags\n\n" + # Grammar. + "Use the upper-left box ('Grammar') to edit your grammar. " + "Each line of your grammar specifies a single 'rule', " + "which performs an action such as creating a chunk or merging " + "two chunks.\n\n" + # Dev set. + "The lower-left box ('Development Set') runs your grammar on the " + "development set, and displays the results. " + "Your grammar's chunks are highlighted, and " + "the correct (gold standard) chunks are " + "underlined. If they " + "match, they are displayed in green; otherwise, " + "they are displayed in red. The box displays a single " + "sentence from the development set at a time; use the scrollbar or " + "the next/previous buttons view additional sentences.\n\n" + # Performance + "The lower-right box ('Evaluation') tracks the performance of " + "your grammar on the development set. The 'precision' axis " + "indicates how many of your grammar's chunks are correct; and " + "the 'recall' axis indicates how many of the gold standard " + "chunks your system generated. Typically, you should try to " + "design a grammar that scores high on both metrics. The " + "exact precision and recall of the current grammar, as well " + "as their harmonic mean (the 'f-score'), are displayed in " + "the status bar at the bottom of the window.", + ), + ( + "Rules", + "10", + "

{...regexp...}

" + "\nChunk rule: creates new chunks from words matching " + "regexp.\n\n" + "

}...regexp...{

" + "\nStrip rule: removes words matching regexp from existing " + "chunks.\n\n" + "

...regexp1...}{...regexp2...

" + "\nSplit rule: splits chunks that match regexp1 followed by " + "regexp2 in two.\n\n" + "

...regexp...{}...regexp...

" + "\nMerge rule: joins consecutive chunks that match regexp1 " + "and regexp2\n", + ), + ( + "Regexps", + "10 60", + # "Regular Expression Syntax Summary:\n\n" + "

Pattern\t\tMatches...

\n" + "" + "\t<T>\ta word with tag T " + "(where T may be a regexp).\n" + "\tx?\tan optional x\n" + "\tx+\ta sequence of 1 or more x's\n" + "\tx*\ta sequence of 0 or more x's\n" + "\tx|y\tx or y\n" + "\t.\tmatches any character\n" + "\t(x)\tTreats x as a group\n" + "\t# x...\tTreats x... " + "(to the end of the line) as a comment\n" + "\t\\C\tmatches character C " + "(useful when C is a special character " + "like + or #)\n" + "" + "\n

Examples:

\n" + "" + "\t\n" + '\t\tMatches "cow/NN"\n' + '\t\tMatches "green/NN"\n' + "\t\n" + '\t\tMatches "eating/VBG"\n' + '\t\tMatches "ate/VBD"\n' + "\t
\n" + '\t\tMatches "on/IN the/DT car/NN"\n' + "\t?\n" + '\t\tMatches "ran/VBD"\n' + '\t\tMatches "slowly/RB ate/VBD"\n' + r"\t<\#> # This is a comment...\n" + '\t\tMatches "#/# 100/CD"\n' + "", + ), + ( + "Tags", + "10 60", + "

Part of Speech Tags:

\n" + + "" + + "<>" + + "\n", # this gets auto-substituted w/ self.TAGSET + ), + ] + + HELP_AUTOTAG = [ + ("red", dict(foreground="#a00")), + ("green", dict(foreground="#080")), + ("highlight", dict(background="#ddd")), + ("underline", dict(underline=True)), + ("h1", dict(underline=True)), + ("indent", dict(lmargin1=20, lmargin2=20)), + ("hangindent", dict(lmargin1=0, lmargin2=60)), + ("var", dict(foreground="#88f")), + ("regexp", dict(foreground="#ba7")), + ("match", dict(foreground="#6a6")), + ] + + ##///////////////////////////////////////////////////////////////// + ## Config Parameters + ##///////////////////////////////////////////////////////////////// + + _EVAL_DELAY = 1 + """If the user has not pressed any key for this amount of time (in + seconds), and the current grammar has not been evaluated, then + the eval demon will evaluate it.""" + + _EVAL_CHUNK = 15 + """The number of sentences that should be evaluated by the eval + demon each time it runs.""" + _EVAL_FREQ = 0.2 + """The frequency (in seconds) at which the eval demon is run""" + _EVAL_DEMON_MIN = 0.02 + """The minimum amount of time that the eval demon should take each time + it runs -- if it takes less than this time, _EVAL_CHUNK will be + modified upwards.""" + _EVAL_DEMON_MAX = 0.04 + """The maximum amount of time that the eval demon should take each time + it runs -- if it takes more than this time, _EVAL_CHUNK will be + modified downwards.""" + + _GRAMMARBOX_PARAMS = dict( + width=40, + height=12, + background="#efe", + highlightbackground="#efe", + highlightthickness=1, + relief="groove", + border=2, + wrap="word", + ) + _HELPBOX_PARAMS = dict( + width=15, + height=15, + background="#efe", + highlightbackground="#efe", + foreground="#555", + highlightthickness=1, + relief="groove", + border=2, + wrap="word", + ) + _DEVSETBOX_PARAMS = dict( + width=70, + height=10, + background="#eef", + highlightbackground="#eef", + highlightthickness=1, + relief="groove", + border=2, + wrap="word", + tabs=(30,), + ) + _STATUS_PARAMS = dict(background="#9bb", relief="groove", border=2) + _FONT_PARAMS = dict(family="helvetica", size=-20) + _FRAME_PARAMS = dict(background="#777", padx=2, pady=2, border=3) + _EVALBOX_PARAMS = dict( + background="#eef", + highlightbackground="#eef", + highlightthickness=1, + relief="groove", + border=2, + width=300, + height=280, + ) + _BUTTON_PARAMS = dict( + background="#777", activebackground="#777", highlightbackground="#777" + ) + _HELPTAB_BG_COLOR = "#aba" + _HELPTAB_FG_COLOR = "#efe" + + _HELPTAB_FG_PARAMS = dict(background="#efe") + _HELPTAB_BG_PARAMS = dict(background="#aba") + _HELPTAB_SPACER = 6 + + def normalize_grammar(self, grammar): + # Strip comments + grammar = re.sub(r"((\\.|[^#])*)(#.*)?", r"\1", grammar) + # Normalize whitespace + grammar = re.sub(" +", " ", grammar) + grammar = re.sub(r"\n\s+", r"\n", grammar) + grammar = grammar.strip() + # [xx] Hack: automatically backslash $! + grammar = re.sub(r"([^\\])\$", r"\1\\$", grammar) + return grammar + + def __init__( + self, + devset_name="conll2000", + devset=None, + grammar="", + chunk_label="NP", + tagset=None, + ): + """ + :param devset_name: The name of the development set; used for + display & for save files. If either the name 'treebank' + or the name 'conll2000' is used, and devset is None, then + devset will be set automatically. + :param devset: A list of chunked sentences + :param grammar: The initial grammar to display. + :param tagset: Dictionary from tags to string descriptions, used + for the help page. Defaults to ``self.TAGSET``. + """ + self._chunk_label = chunk_label + + if tagset is None: + tagset = self.TAGSET + self.tagset = tagset + + # Named development sets: + if devset is None: + if devset_name == "conll2000": + devset = conll2000.chunked_sents("train.txt") # [:100] + elif devset == "treebank": + devset = treebank_chunk.chunked_sents() # [:100] + else: + raise ValueError("Unknown development set %s" % devset_name) + + self.chunker = None + """The chunker built from the grammar string""" + + self.grammar = grammar + """The unparsed grammar string""" + + self.normalized_grammar = None + """A normalized version of ``self.grammar``.""" + + self.grammar_changed = 0 + """The last time() that the grammar was changed.""" + + self.devset = devset + """The development set -- a list of chunked sentences.""" + + self.devset_name = devset_name + """The name of the development set (for save files).""" + + self.devset_index = -1 + """The index into the development set of the first instance + that's currently being viewed.""" + + self._last_keypress = 0 + """The time() when a key was most recently pressed""" + + self._history = [] + """A list of (grammar, precision, recall, fscore) tuples for + grammars that the user has already tried.""" + + self._history_index = 0 + """When the user is scrolling through previous grammars, this + is used to keep track of which grammar they're looking at.""" + + self._eval_grammar = None + """The grammar that is being currently evaluated by the eval + demon.""" + + self._eval_normalized_grammar = None + """A normalized copy of ``_eval_grammar``.""" + + self._eval_index = 0 + """The index of the next sentence in the development set that + should be looked at by the eval demon.""" + + self._eval_score = ChunkScore(chunk_label=chunk_label) + """The ``ChunkScore`` object that's used to keep track of the score + of the current grammar on the development set.""" + + # Set up the main window. + top = self.top = Tk() + top.geometry("+50+50") + top.title("Regexp Chunk Parser App") + top.bind("", self.destroy) + + # Variable that restricts how much of the devset we look at. + self._devset_size = IntVar(top) + self._devset_size.set(100) + + # Set up all the tkinter widgets + self._init_fonts(top) + self._init_widgets(top) + self._init_bindings(top) + self._init_menubar(top) + self.grammarbox.focus() + + # If a grammar was given, then display it. + if grammar: + self.grammarbox.insert("end", grammar + "\n") + self.grammarbox.mark_set("insert", "1.0") + + # Display the first item in the development set + self.show_devset(0) + self.update() + + def _init_bindings(self, top): + top.bind("", self._devset_next) + top.bind("", self._devset_prev) + top.bind("", self.toggle_show_trace) + top.bind("", self.update) + top.bind("", lambda e: self.save_grammar()) + top.bind("", lambda e: self.load_grammar()) + self.grammarbox.bind("", self.toggle_show_trace) + self.grammarbox.bind("", self._devset_next) + self.grammarbox.bind("", self._devset_prev) + + # Redraw the eval graph when the window size changes + self.evalbox.bind("", self._eval_plot) + + def _init_fonts(self, top): + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(top) + self._size.set(20) + self._font = Font(family="helvetica", size=-self._size.get()) + self._smallfont = Font( + family="helvetica", size=-(int(self._size.get() * 14 // 20)) + ) + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command(label="Reset Application", underline=0, command=self.reset) + filemenu.add_command( + label="Save Current Grammar", + underline=0, + accelerator="Ctrl-s", + command=self.save_grammar, + ) + filemenu.add_command( + label="Load Grammar", + underline=0, + accelerator="Ctrl-o", + command=self.load_grammar, + ) + + filemenu.add_command( + label="Save Grammar History", underline=13, command=self.save_history + ) + + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=16, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=20, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=34, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + devsetmenu = Menu(menubar, tearoff=0) + devsetmenu.add_radiobutton( + label="50 sentences", + variable=self._devset_size, + value=50, + command=self.set_devset_size, + ) + devsetmenu.add_radiobutton( + label="100 sentences", + variable=self._devset_size, + value=100, + command=self.set_devset_size, + ) + devsetmenu.add_radiobutton( + label="200 sentences", + variable=self._devset_size, + value=200, + command=self.set_devset_size, + ) + devsetmenu.add_radiobutton( + label="500 sentences", + variable=self._devset_size, + value=500, + command=self.set_devset_size, + ) + menubar.add_cascade(label="Development-Set", underline=0, menu=devsetmenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + def toggle_show_trace(self, *e): + if self._showing_trace: + self.show_devset() + else: + self.show_trace() + return "break" + + _SCALE_N = 5 # center on the last 5 examples. + _DRAW_LINES = False + + def _eval_plot(self, *e, **config): + width = config.get("width", self.evalbox.winfo_width()) + height = config.get("height", self.evalbox.winfo_height()) + + # Clear the canvas + self.evalbox.delete("all") + + # Draw the precision & recall labels. + tag = self.evalbox.create_text( + 10, height // 2 - 10, justify="left", anchor="w", text="Precision" + ) + left, right = self.evalbox.bbox(tag)[2] + 5, width - 10 + tag = self.evalbox.create_text( + left + (width - left) // 2, + height - 10, + anchor="s", + text="Recall", + justify="center", + ) + top, bot = 10, self.evalbox.bbox(tag)[1] - 10 + + # Draw masks for clipping the plot. + bg = self._EVALBOX_PARAMS["background"] + self.evalbox.lower( + self.evalbox.create_rectangle(0, 0, left - 1, 5000, fill=bg, outline=bg) + ) + self.evalbox.lower( + self.evalbox.create_rectangle(0, bot + 1, 5000, 5000, fill=bg, outline=bg) + ) + + # Calculate the plot's scale. + if self._autoscale.get() and len(self._history) > 1: + max_precision = max_recall = 0 + min_precision = min_recall = 1 + for i in range(1, min(len(self._history), self._SCALE_N + 1)): + grammar, precision, recall, fmeasure = self._history[-i] + min_precision = min(precision, min_precision) + min_recall = min(recall, min_recall) + max_precision = max(precision, max_precision) + max_recall = max(recall, max_recall) + # if max_precision-min_precision > max_recall-min_recall: + # min_recall -= (max_precision-min_precision)/2 + # max_recall += (max_precision-min_precision)/2 + # else: + # min_precision -= (max_recall-min_recall)/2 + # max_precision += (max_recall-min_recall)/2 + # if min_recall < 0: + # max_recall -= min_recall + # min_recall = 0 + # if min_precision < 0: + # max_precision -= min_precision + # min_precision = 0 + min_precision = max(min_precision - 0.01, 0) + min_recall = max(min_recall - 0.01, 0) + max_precision = min(max_precision + 0.01, 1) + max_recall = min(max_recall + 0.01, 1) + else: + min_precision = min_recall = 0 + max_precision = max_recall = 1 + + # Draw the axis lines & grid lines + for i in range(11): + x = left + (right - left) * ( + (i / 10.0 - min_recall) / (max_recall - min_recall) + ) + y = bot - (bot - top) * ( + (i / 10.0 - min_precision) / (max_precision - min_precision) + ) + if left < x < right: + self.evalbox.create_line(x, top, x, bot, fill="#888") + if top < y < bot: + self.evalbox.create_line(left, y, right, y, fill="#888") + self.evalbox.create_line(left, top, left, bot) + self.evalbox.create_line(left, bot, right, bot) + + # Display the plot's scale + self.evalbox.create_text( + left - 3, + bot, + justify="right", + anchor="se", + text="%d%%" % (100 * min_precision), + ) + self.evalbox.create_text( + left - 3, + top, + justify="right", + anchor="ne", + text="%d%%" % (100 * max_precision), + ) + self.evalbox.create_text( + left, + bot + 3, + justify="center", + anchor="nw", + text="%d%%" % (100 * min_recall), + ) + self.evalbox.create_text( + right, + bot + 3, + justify="center", + anchor="ne", + text="%d%%" % (100 * max_recall), + ) + + # Display the scores. + prev_x = prev_y = None + for i, (_, precision, recall, fscore) in enumerate(self._history): + x = left + (right - left) * ( + (recall - min_recall) / (max_recall - min_recall) + ) + y = bot - (bot - top) * ( + (precision - min_precision) / (max_precision - min_precision) + ) + if i == self._history_index: + self.evalbox.create_oval( + x - 2, y - 2, x + 2, y + 2, fill="#0f0", outline="#000" + ) + self.status["text"] = ( + "Precision: %.2f%%\t" % (precision * 100) + + "Recall: %.2f%%\t" % (recall * 100) + + "F-score: %.2f%%" % (fscore * 100) + ) + else: + self.evalbox.lower( + self.evalbox.create_oval( + x - 2, y - 2, x + 2, y + 2, fill="#afa", outline="#8c8" + ) + ) + if prev_x is not None and self._eval_lines.get(): + self.evalbox.lower( + self.evalbox.create_line(prev_x, prev_y, x, y, fill="#8c8") + ) + prev_x, prev_y = x, y + + _eval_demon_running = False + + def _eval_demon(self): + if self.top is None: + return + if self.chunker is None: + self._eval_demon_running = False + return + + # Note our starting time. + t0 = time.time() + + # If are still typing, then wait for them to finish. + if ( + time.time() - self._last_keypress < self._EVAL_DELAY + and self.normalized_grammar != self._eval_normalized_grammar + ): + self._eval_demon_running = True + return self.top.after(int(self._EVAL_FREQ * 1000), self._eval_demon) + + # If the grammar changed, restart the evaluation. + if self.normalized_grammar != self._eval_normalized_grammar: + # Check if we've seen this grammar already. If so, then + # just use the old evaluation values. + for (g, p, r, f) in self._history: + if self.normalized_grammar == self.normalize_grammar(g): + self._history.append((g, p, r, f)) + self._history_index = len(self._history) - 1 + self._eval_plot() + self._eval_demon_running = False + self._eval_normalized_grammar = None + return + self._eval_index = 0 + self._eval_score = ChunkScore(chunk_label=self._chunk_label) + self._eval_grammar = self.grammar + self._eval_normalized_grammar = self.normalized_grammar + + # If the grammar is empty, the don't bother evaluating it, or + # recording it in history -- the score will just be 0. + if self.normalized_grammar.strip() == "": + # self._eval_index = self._devset_size.get() + self._eval_demon_running = False + return + + # Score the next set of examples + for gold in self.devset[ + self._eval_index : min( + self._eval_index + self._EVAL_CHUNK, self._devset_size.get() + ) + ]: + guess = self._chunkparse(gold.leaves()) + self._eval_score.score(gold, guess) + + # update our index in the devset. + self._eval_index += self._EVAL_CHUNK + + # Check if we're done + if self._eval_index >= self._devset_size.get(): + self._history.append( + ( + self._eval_grammar, + self._eval_score.precision(), + self._eval_score.recall(), + self._eval_score.f_measure(), + ) + ) + self._history_index = len(self._history) - 1 + self._eval_plot() + self._eval_demon_running = False + self._eval_normalized_grammar = None + else: + progress = 100 * self._eval_index / self._devset_size.get() + self.status["text"] = "Evaluating on Development Set (%d%%)" % progress + self._eval_demon_running = True + self._adaptively_modify_eval_chunk(time.time() - t0) + self.top.after(int(self._EVAL_FREQ * 1000), self._eval_demon) + + def _adaptively_modify_eval_chunk(self, t): + """ + Modify _EVAL_CHUNK to try to keep the amount of time that the + eval demon takes between _EVAL_DEMON_MIN and _EVAL_DEMON_MAX. + + :param t: The amount of time that the eval demon took. + """ + if t > self._EVAL_DEMON_MAX and self._EVAL_CHUNK > 5: + self._EVAL_CHUNK = min( + self._EVAL_CHUNK - 1, + max( + int(self._EVAL_CHUNK * (self._EVAL_DEMON_MAX / t)), + self._EVAL_CHUNK - 10, + ), + ) + elif t < self._EVAL_DEMON_MIN: + self._EVAL_CHUNK = max( + self._EVAL_CHUNK + 1, + min( + int(self._EVAL_CHUNK * (self._EVAL_DEMON_MIN / t)), + self._EVAL_CHUNK + 10, + ), + ) + + def _init_widgets(self, top): + frame0 = Frame(top, **self._FRAME_PARAMS) + frame0.grid_columnconfigure(0, weight=4) + frame0.grid_columnconfigure(3, weight=2) + frame0.grid_rowconfigure(1, weight=1) + frame0.grid_rowconfigure(5, weight=1) + + # The grammar + self.grammarbox = Text(frame0, font=self._font, **self._GRAMMARBOX_PARAMS) + self.grammarlabel = Label( + frame0, + font=self._font, + text="Grammar:", + highlightcolor="black", + background=self._GRAMMARBOX_PARAMS["background"], + ) + self.grammarlabel.grid(column=0, row=0, sticky="SW") + self.grammarbox.grid(column=0, row=1, sticky="NEWS") + + # Scroll bar for grammar + grammar_scrollbar = Scrollbar(frame0, command=self.grammarbox.yview) + grammar_scrollbar.grid(column=1, row=1, sticky="NWS") + self.grammarbox.config(yscrollcommand=grammar_scrollbar.set) + + # grammar buttons + bg = self._FRAME_PARAMS["background"] + frame3 = Frame(frame0, background=bg) + frame3.grid(column=0, row=2, sticky="EW") + Button( + frame3, + text="Prev Grammar", + command=self._history_prev, + **self._BUTTON_PARAMS, + ).pack(side="left") + Button( + frame3, + text="Next Grammar", + command=self._history_next, + **self._BUTTON_PARAMS, + ).pack(side="left") + + # Help box + self.helpbox = Text(frame0, font=self._smallfont, **self._HELPBOX_PARAMS) + self.helpbox.grid(column=3, row=1, sticky="NEWS") + self.helptabs = {} + bg = self._FRAME_PARAMS["background"] + helptab_frame = Frame(frame0, background=bg) + helptab_frame.grid(column=3, row=0, sticky="SW") + for i, (tab, tabstops, text) in enumerate(self.HELP): + label = Label(helptab_frame, text=tab, font=self._smallfont) + label.grid(column=i * 2, row=0, sticky="S") + # help_frame.grid_columnconfigure(i, weight=1) + # label.pack(side='left') + label.bind("", lambda e, tab=tab: self.show_help(tab)) + self.helptabs[tab] = label + Frame( + helptab_frame, height=1, width=self._HELPTAB_SPACER, background=bg + ).grid(column=i * 2 + 1, row=0) + self.helptabs[self.HELP[0][0]].configure(font=self._font) + self.helpbox.tag_config("elide", elide=True) + for (tag, params) in self.HELP_AUTOTAG: + self.helpbox.tag_config("tag-%s" % tag, **params) + self.show_help(self.HELP[0][0]) + + # Scroll bar for helpbox + help_scrollbar = Scrollbar(frame0, command=self.helpbox.yview) + self.helpbox.config(yscrollcommand=help_scrollbar.set) + help_scrollbar.grid(column=4, row=1, sticky="NWS") + + # The dev set + frame4 = Frame(frame0, background=self._FRAME_PARAMS["background"]) + self.devsetbox = Text(frame4, font=self._font, **self._DEVSETBOX_PARAMS) + self.devsetbox.pack(expand=True, fill="both") + self.devsetlabel = Label( + frame0, + font=self._font, + text="Development Set:", + justify="right", + background=self._DEVSETBOX_PARAMS["background"], + ) + self.devsetlabel.grid(column=0, row=4, sticky="SW") + frame4.grid(column=0, row=5, sticky="NEWS") + + # dev set scrollbars + self.devset_scroll = Scrollbar(frame0, command=self._devset_scroll) + self.devset_scroll.grid(column=1, row=5, sticky="NWS") + self.devset_xscroll = Scrollbar( + frame4, command=self.devsetbox.xview, orient="horiz" + ) + self.devsetbox["xscrollcommand"] = self.devset_xscroll.set + self.devset_xscroll.pack(side="bottom", fill="x") + + # dev set buttons + bg = self._FRAME_PARAMS["background"] + frame1 = Frame(frame0, background=bg) + frame1.grid(column=0, row=7, sticky="EW") + Button( + frame1, + text="Prev Example (Ctrl-p)", + command=self._devset_prev, + **self._BUTTON_PARAMS, + ).pack(side="left") + Button( + frame1, + text="Next Example (Ctrl-n)", + command=self._devset_next, + **self._BUTTON_PARAMS, + ).pack(side="left") + self.devset_button = Button( + frame1, + text="Show example", + command=self.show_devset, + state="disabled", + **self._BUTTON_PARAMS, + ) + self.devset_button.pack(side="right") + self.trace_button = Button( + frame1, text="Show trace", command=self.show_trace, **self._BUTTON_PARAMS + ) + self.trace_button.pack(side="right") + + # evaluation box + self.evalbox = Canvas(frame0, **self._EVALBOX_PARAMS) + label = Label( + frame0, + font=self._font, + text="Evaluation:", + justify="right", + background=self._EVALBOX_PARAMS["background"], + ) + label.grid(column=3, row=4, sticky="SW") + self.evalbox.grid(column=3, row=5, sticky="NEWS", columnspan=2) + + # evaluation box buttons + bg = self._FRAME_PARAMS["background"] + frame2 = Frame(frame0, background=bg) + frame2.grid(column=3, row=7, sticky="EW") + self._autoscale = IntVar(self.top) + self._autoscale.set(False) + Checkbutton( + frame2, + variable=self._autoscale, + command=self._eval_plot, + text="Zoom", + **self._BUTTON_PARAMS, + ).pack(side="left") + self._eval_lines = IntVar(self.top) + self._eval_lines.set(False) + Checkbutton( + frame2, + variable=self._eval_lines, + command=self._eval_plot, + text="Lines", + **self._BUTTON_PARAMS, + ).pack(side="left") + Button(frame2, text="History", **self._BUTTON_PARAMS).pack(side="right") + + # The status label + self.status = Label(frame0, font=self._font, **self._STATUS_PARAMS) + self.status.grid(column=0, row=9, sticky="NEW", padx=3, pady=2, columnspan=5) + + # Help box & devset box can't be edited. + self.helpbox["state"] = "disabled" + self.devsetbox["state"] = "disabled" + + # Spacers + bg = self._FRAME_PARAMS["background"] + Frame(frame0, height=10, width=0, background=bg).grid(column=0, row=3) + Frame(frame0, height=0, width=10, background=bg).grid(column=2, row=0) + Frame(frame0, height=6, width=0, background=bg).grid(column=0, row=8) + + # pack the frame. + frame0.pack(fill="both", expand=True) + + # Set up colors for the devset box + self.devsetbox.tag_config("true-pos", background="#afa", underline="True") + self.devsetbox.tag_config("false-neg", underline="True", foreground="#800") + self.devsetbox.tag_config("false-pos", background="#faa") + self.devsetbox.tag_config("trace", foreground="#666", wrap="none") + self.devsetbox.tag_config("wrapindent", lmargin2=30, wrap="none") + self.devsetbox.tag_config("error", foreground="#800") + + # And for the grammarbox + self.grammarbox.tag_config("error", background="#fec") + self.grammarbox.tag_config("comment", foreground="#840") + self.grammarbox.tag_config("angle", foreground="#00f") + self.grammarbox.tag_config("brace", foreground="#0a0") + self.grammarbox.tag_config("hangindent", lmargin1=0, lmargin2=40) + + _showing_trace = False + + def show_trace(self, *e): + self._showing_trace = True + self.trace_button["state"] = "disabled" + self.devset_button["state"] = "normal" + + self.devsetbox["state"] = "normal" + # self.devsetbox['wrap'] = 'none' + self.devsetbox.delete("1.0", "end") + self.devsetlabel["text"] = "Development Set (%d/%d)" % ( + (self.devset_index + 1, self._devset_size.get()) + ) + + if self.chunker is None: + self.devsetbox.insert("1.0", "Trace: waiting for a valid grammar.") + self.devsetbox.tag_add("error", "1.0", "end") + return # can't do anything more + + gold_tree = self.devset[self.devset_index] + rules = self.chunker.rules() + + # Calculate the tag sequence + tagseq = "\t" + charnum = [1] + for wordnum, (word, pos) in enumerate(gold_tree.leaves()): + tagseq += "%s " % pos + charnum.append(len(tagseq)) + self.charnum = { + (i, j): charnum[j] + for i in range(len(rules) + 1) + for j in range(len(charnum)) + } + self.linenum = {i: i * 2 + 2 for i in range(len(rules) + 1)} + + for i in range(len(rules) + 1): + if i == 0: + self.devsetbox.insert("end", "Start:\n") + self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c") + else: + self.devsetbox.insert("end", "Apply %s:\n" % rules[i - 1]) + self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c") + # Display the tag sequence. + self.devsetbox.insert("end", tagseq + "\n") + self.devsetbox.tag_add("wrapindent", "end -2c linestart", "end -2c") + # Run a partial parser, and extract gold & test chunks + chunker = RegexpChunkParser(rules[:i]) + test_tree = self._chunkparse(gold_tree.leaves()) + gold_chunks = self._chunks(gold_tree) + test_chunks = self._chunks(test_tree) + # Compare them. + for chunk in gold_chunks.intersection(test_chunks): + self._color_chunk(i, chunk, "true-pos") + for chunk in gold_chunks - test_chunks: + self._color_chunk(i, chunk, "false-neg") + for chunk in test_chunks - gold_chunks: + self._color_chunk(i, chunk, "false-pos") + self.devsetbox.insert("end", "Finished.\n") + self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c") + + # This is a hack, because the x-scrollbar isn't updating its + # position right -- I'm not sure what the underlying cause is + # though. (This is on OS X w/ python 2.5) + self.top.after(100, self.devset_xscroll.set, 0, 0.3) + + def show_help(self, tab): + self.helpbox["state"] = "normal" + self.helpbox.delete("1.0", "end") + for (name, tabstops, text) in self.HELP: + if name == tab: + text = text.replace( + "<>", + "\n".join( + "\t%s\t%s" % item + for item in sorted( + list(self.tagset.items()), + key=lambda t_w: re.match(r"\w+", t_w[0]) + and (0, t_w[0]) + or (1, t_w[0]), + ) + ), + ) + + self.helptabs[name].config(**self._HELPTAB_FG_PARAMS) + self.helpbox.config(tabs=tabstops) + self.helpbox.insert("1.0", text + "\n" * 20) + C = "1.0 + %d chars" + for (tag, params) in self.HELP_AUTOTAG: + pattern = f"(?s)(<{tag}>)(.*?)()" + for m in re.finditer(pattern, text): + self.helpbox.tag_add("elide", C % m.start(1), C % m.end(1)) + self.helpbox.tag_add( + "tag-%s" % tag, C % m.start(2), C % m.end(2) + ) + self.helpbox.tag_add("elide", C % m.start(3), C % m.end(3)) + else: + self.helptabs[name].config(**self._HELPTAB_BG_PARAMS) + self.helpbox["state"] = "disabled" + + def _history_prev(self, *e): + self._view_history(self._history_index - 1) + return "break" + + def _history_next(self, *e): + self._view_history(self._history_index + 1) + return "break" + + def _view_history(self, index): + # Bounds & sanity checking: + index = max(0, min(len(self._history) - 1, index)) + if not self._history: + return + # Already viewing the requested history item? + if index == self._history_index: + return + # Show the requested grammar. It will get added to _history + # only if they edit it (causing self.update() to get run.) + self.grammarbox["state"] = "normal" + self.grammarbox.delete("1.0", "end") + self.grammarbox.insert("end", self._history[index][0]) + self.grammarbox.mark_set("insert", "1.0") + self._history_index = index + self._syntax_highlight_grammar(self._history[index][0]) + # Record the normalized grammar & regenerate the chunker. + self.normalized_grammar = self.normalize_grammar(self._history[index][0]) + if self.normalized_grammar: + rules = [ + RegexpChunkRule.fromstring(line) + for line in self.normalized_grammar.split("\n") + ] + else: + rules = [] + self.chunker = RegexpChunkParser(rules) + # Show the score. + self._eval_plot() + # Update the devset box + self._highlight_devset() + if self._showing_trace: + self.show_trace() + # Update the grammar label + if self._history_index < len(self._history) - 1: + self.grammarlabel["text"] = "Grammar {}/{}:".format( + self._history_index + 1, + len(self._history), + ) + else: + self.grammarlabel["text"] = "Grammar:" + + def _devset_next(self, *e): + self._devset_scroll("scroll", 1, "page") + return "break" + + def _devset_prev(self, *e): + self._devset_scroll("scroll", -1, "page") + return "break" + + def destroy(self, *e): + if self.top is None: + return + self.top.destroy() + self.top = None + + def _devset_scroll(self, command, *args): + N = 1 # size of a page -- one sentence. + showing_trace = self._showing_trace + if command == "scroll" and args[1].startswith("unit"): + self.show_devset(self.devset_index + int(args[0])) + elif command == "scroll" and args[1].startswith("page"): + self.show_devset(self.devset_index + N * int(args[0])) + elif command == "moveto": + self.show_devset(int(float(args[0]) * self._devset_size.get())) + else: + assert 0, f"bad scroll command {command} {args}" + if showing_trace: + self.show_trace() + + def show_devset(self, index=None): + if index is None: + index = self.devset_index + + # Bounds checking + index = min(max(0, index), self._devset_size.get() - 1) + + if index == self.devset_index and not self._showing_trace: + return + self.devset_index = index + + self._showing_trace = False + self.trace_button["state"] = "normal" + self.devset_button["state"] = "disabled" + + # Clear the text box. + self.devsetbox["state"] = "normal" + self.devsetbox["wrap"] = "word" + self.devsetbox.delete("1.0", "end") + self.devsetlabel["text"] = "Development Set (%d/%d)" % ( + (self.devset_index + 1, self._devset_size.get()) + ) + + # Add the sentences + sample = self.devset[self.devset_index : self.devset_index + 1] + self.charnum = {} + self.linenum = {0: 1} + for sentnum, sent in enumerate(sample): + linestr = "" + for wordnum, (word, pos) in enumerate(sent.leaves()): + self.charnum[sentnum, wordnum] = len(linestr) + linestr += f"{word}/{pos} " + self.charnum[sentnum, wordnum + 1] = len(linestr) + self.devsetbox.insert("end", linestr[:-1] + "\n\n") + + # Highlight chunks in the dev set + if self.chunker is not None: + self._highlight_devset() + self.devsetbox["state"] = "disabled" + + # Update the scrollbar + first = self.devset_index / self._devset_size.get() + last = (self.devset_index + 2) / self._devset_size.get() + self.devset_scroll.set(first, last) + + def _chunks(self, tree): + chunks = set() + wordnum = 0 + for child in tree: + if isinstance(child, Tree): + if child.label() == self._chunk_label: + chunks.add((wordnum, wordnum + len(child))) + wordnum += len(child) + else: + wordnum += 1 + return chunks + + def _syntax_highlight_grammar(self, grammar): + if self.top is None: + return + self.grammarbox.tag_remove("comment", "1.0", "end") + self.grammarbox.tag_remove("angle", "1.0", "end") + self.grammarbox.tag_remove("brace", "1.0", "end") + self.grammarbox.tag_add("hangindent", "1.0", "end") + for lineno, line in enumerate(grammar.split("\n")): + if not line.strip(): + continue + m = re.match(r"(\\.|[^#])*(#.*)?", line) + comment_start = None + if m.group(2): + comment_start = m.start(2) + s = "%d.%d" % (lineno + 1, m.start(2)) + e = "%d.%d" % (lineno + 1, m.end(2)) + self.grammarbox.tag_add("comment", s, e) + for m in re.finditer("[<>{}]", line): + if comment_start is not None and m.start() >= comment_start: + break + s = "%d.%d" % (lineno + 1, m.start()) + e = "%d.%d" % (lineno + 1, m.end()) + if m.group() in "<>": + self.grammarbox.tag_add("angle", s, e) + else: + self.grammarbox.tag_add("brace", s, e) + + def _grammarcheck(self, grammar): + if self.top is None: + return + self.grammarbox.tag_remove("error", "1.0", "end") + self._grammarcheck_errs = [] + for lineno, line in enumerate(grammar.split("\n")): + line = re.sub(r"((\\.|[^#])*)(#.*)?", r"\1", line) + line = line.strip() + if line: + try: + RegexpChunkRule.fromstring(line) + except ValueError as e: + self.grammarbox.tag_add( + "error", "%s.0" % (lineno + 1), "%s.0 lineend" % (lineno + 1) + ) + self.status["text"] = "" + + def update(self, *event): + # Record when update was called (for grammarcheck) + if event: + self._last_keypress = time.time() + + # Read the grammar from the Text box. + self.grammar = grammar = self.grammarbox.get("1.0", "end") + + # If the grammar hasn't changed, do nothing: + normalized_grammar = self.normalize_grammar(grammar) + if normalized_grammar == self.normalized_grammar: + return + else: + self.normalized_grammar = normalized_grammar + + # If the grammar has changed, and we're looking at history, + # then stop looking at history. + if self._history_index < len(self._history) - 1: + self.grammarlabel["text"] = "Grammar:" + + self._syntax_highlight_grammar(grammar) + + # The grammar has changed; try parsing it. If it doesn't + # parse, do nothing. (flag error location?) + try: + # Note: the normalized grammar has no blank lines. + if normalized_grammar: + rules = [ + RegexpChunkRule.fromstring(line) + for line in normalized_grammar.split("\n") + ] + else: + rules = [] + except ValueError as e: + # Use the un-normalized grammar for error highlighting. + self._grammarcheck(grammar) + self.chunker = None + return + + self.chunker = RegexpChunkParser(rules) + self.grammarbox.tag_remove("error", "1.0", "end") + self.grammar_changed = time.time() + # Display the results + if self._showing_trace: + self.show_trace() + else: + self._highlight_devset() + # Start the eval demon + if not self._eval_demon_running: + self._eval_demon() + + def _highlight_devset(self, sample=None): + if sample is None: + sample = self.devset[self.devset_index : self.devset_index + 1] + + self.devsetbox.tag_remove("true-pos", "1.0", "end") + self.devsetbox.tag_remove("false-neg", "1.0", "end") + self.devsetbox.tag_remove("false-pos", "1.0", "end") + + # Run the grammar on the test cases. + for sentnum, gold_tree in enumerate(sample): + # Run the chunk parser + test_tree = self._chunkparse(gold_tree.leaves()) + # Extract gold & test chunks + gold_chunks = self._chunks(gold_tree) + test_chunks = self._chunks(test_tree) + # Compare them. + for chunk in gold_chunks.intersection(test_chunks): + self._color_chunk(sentnum, chunk, "true-pos") + for chunk in gold_chunks - test_chunks: + self._color_chunk(sentnum, chunk, "false-neg") + for chunk in test_chunks - gold_chunks: + self._color_chunk(sentnum, chunk, "false-pos") + + def _chunkparse(self, words): + try: + return self.chunker.parse(words) + except (ValueError, IndexError) as e: + # There's an error somewhere in the grammar, but we're not sure + # exactly where, so just mark the whole grammar as bad. + # E.g., this is caused by: "({})" + self.grammarbox.tag_add("error", "1.0", "end") + # Treat it as tagging nothing: + return words + + def _color_chunk(self, sentnum, chunk, tag): + start, end = chunk + self.devsetbox.tag_add( + tag, + f"{self.linenum[sentnum]}.{self.charnum[sentnum, start]}", + f"{self.linenum[sentnum]}.{self.charnum[sentnum, end] - 1}", + ) + + def reset(self): + # Clear various variables + self.chunker = None + self.grammar = None + self.normalized_grammar = None + self.grammar_changed = 0 + self._history = [] + self._history_index = 0 + # Update the on-screen display. + self.grammarbox.delete("1.0", "end") + self.show_devset(0) + self.update() + # self._eval_plot() + + SAVE_GRAMMAR_TEMPLATE = ( + "# Regexp Chunk Parsing Grammar\n" + "# Saved %(date)s\n" + "#\n" + "# Development set: %(devset)s\n" + "# Precision: %(precision)s\n" + "# Recall: %(recall)s\n" + "# F-score: %(fscore)s\n\n" + "%(grammar)s\n" + ) + + def save_grammar(self, filename=None): + if not filename: + ftypes = [("Chunk Gramamr", ".chunk"), ("All files", "*")] + filename = asksaveasfilename(filetypes=ftypes, defaultextension=".chunk") + if not filename: + return + if self._history and self.normalized_grammar == self.normalize_grammar( + self._history[-1][0] + ): + precision, recall, fscore = ( + "%.2f%%" % (100 * v) for v in self._history[-1][1:] + ) + elif self.chunker is None: + precision = recall = fscore = "Grammar not well formed" + else: + precision = recall = fscore = "Not finished evaluation yet" + + with open(filename, "w") as outfile: + outfile.write( + self.SAVE_GRAMMAR_TEMPLATE + % dict( + date=time.ctime(), + devset=self.devset_name, + precision=precision, + recall=recall, + fscore=fscore, + grammar=self.grammar.strip(), + ) + ) + + def load_grammar(self, filename=None): + if not filename: + ftypes = [("Chunk Gramamr", ".chunk"), ("All files", "*")] + filename = askopenfilename(filetypes=ftypes, defaultextension=".chunk") + if not filename: + return + self.grammarbox.delete("1.0", "end") + self.update() + with open(filename) as infile: + grammar = infile.read() + grammar = re.sub( + r"^\# Regexp Chunk Parsing Grammar[\s\S]*" "F-score:.*\n", "", grammar + ).lstrip() + self.grammarbox.insert("1.0", grammar) + self.update() + + def save_history(self, filename=None): + if not filename: + ftypes = [("Chunk Gramamr History", ".txt"), ("All files", "*")] + filename = asksaveasfilename(filetypes=ftypes, defaultextension=".txt") + if not filename: + return + + with open(filename, "w") as outfile: + outfile.write("# Regexp Chunk Parsing Grammar History\n") + outfile.write("# Saved %s\n" % time.ctime()) + outfile.write("# Development set: %s\n" % self.devset_name) + for i, (g, p, r, f) in enumerate(self._history): + hdr = ( + "Grammar %d/%d (precision=%.2f%%, recall=%.2f%%, " + "fscore=%.2f%%)" + % (i + 1, len(self._history), p * 100, r * 100, f * 100) + ) + outfile.write("\n%s\n" % hdr) + outfile.write("".join(" %s\n" % line for line in g.strip().split())) + + if not ( + self._history + and self.normalized_grammar + == self.normalize_grammar(self._history[-1][0]) + ): + if self.chunker is None: + outfile.write("\nCurrent Grammar (not well-formed)\n") + else: + outfile.write("\nCurrent Grammar (not evaluated)\n") + outfile.write( + "".join(" %s\n" % line for line in self.grammar.strip().split()) + ) + + def about(self, *e): + ABOUT = "NLTK RegExp Chunk Parser Application\n" + "Written by Edward Loper" + TITLE = "About: Regular Expression Chunk Parser Application" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self.top, TITLE, ABOUT) + + def set_devset_size(self, size=None): + if size is not None: + self._devset_size.set(size) + self._devset_size.set(min(len(self.devset), self._devset_size.get())) + self.show_devset(1) + self.show_devset(0) + # what about history? Evaluated at diff dev set sizes! + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._smallfont.configure(size=min(-10, -(abs(size)) * 14 // 20)) + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self.top.mainloop(*args, **kwargs) + + +def app(): + RegexpChunkApp().mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/lib/python3.10/site-packages/nltk/app/collocations_app.py b/lib/python3.10/site-packages/nltk/app/collocations_app.py new file mode 100644 index 0000000000000000000000000000000000000000..19c661368fd9e96d1a4bf1a47ebfbd07a4bb3d80 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/app/collocations_app.py @@ -0,0 +1,438 @@ +# Natural Language Toolkit: Collocations Application +# Much of the GUI code is imported from concordance.py; We intend to merge these tools together +# Copyright (C) 2001-2023 NLTK Project +# Author: Sumukh Ghodke +# URL: +# For license information, see LICENSE.TXT +# + + +import queue as q +import threading +from tkinter import ( + END, + LEFT, + SUNKEN, + Button, + Frame, + IntVar, + Label, + Menu, + OptionMenu, + Scrollbar, + StringVar, + Text, + Tk, +) +from tkinter.font import Font + +from nltk.corpus import ( + alpino, + brown, + cess_cat, + cess_esp, + floresta, + indian, + mac_morpho, + machado, + nps_chat, + sinica_treebank, + treebank, +) +from nltk.probability import FreqDist +from nltk.util import in_idle + +CORPUS_LOADED_EVENT = "<>" +ERROR_LOADING_CORPUS_EVENT = "<>" +POLL_INTERVAL = 100 + +_DEFAULT = "English: Brown Corpus (Humor)" +_CORPORA = { + "Catalan: CESS-CAT Corpus": lambda: cess_cat.words(), + "English: Brown Corpus": lambda: brown.words(), + "English: Brown Corpus (Press)": lambda: brown.words( + categories=["news", "editorial", "reviews"] + ), + "English: Brown Corpus (Religion)": lambda: brown.words(categories="religion"), + "English: Brown Corpus (Learned)": lambda: brown.words(categories="learned"), + "English: Brown Corpus (Science Fiction)": lambda: brown.words( + categories="science_fiction" + ), + "English: Brown Corpus (Romance)": lambda: brown.words(categories="romance"), + "English: Brown Corpus (Humor)": lambda: brown.words(categories="humor"), + "English: NPS Chat Corpus": lambda: nps_chat.words(), + "English: Wall Street Journal Corpus": lambda: treebank.words(), + "Chinese: Sinica Corpus": lambda: sinica_treebank.words(), + "Dutch: Alpino Corpus": lambda: alpino.words(), + "Hindi: Indian Languages Corpus": lambda: indian.words(files="hindi.pos"), + "Portuguese: Floresta Corpus (Portugal)": lambda: floresta.words(), + "Portuguese: MAC-MORPHO Corpus (Brazil)": lambda: mac_morpho.words(), + "Portuguese: Machado Corpus (Brazil)": lambda: machado.words(), + "Spanish: CESS-ESP Corpus": lambda: cess_esp.words(), +} + + +class CollocationsView: + _BACKGROUND_COLOUR = "#FFF" # white + + def __init__(self): + self.queue = q.Queue() + self.model = CollocationsModel(self.queue) + self.top = Tk() + self._init_top(self.top) + self._init_menubar() + self._init_widgets(self.top) + self.load_corpus(self.model.DEFAULT_CORPUS) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def _init_top(self, top): + top.geometry("550x650+50+50") + top.title("NLTK Collocations List") + top.bind("", self.destroy) + top.protocol("WM_DELETE_WINDOW", self.destroy) + top.minsize(550, 650) + + def _init_widgets(self, parent): + self.main_frame = Frame( + parent, dict(background=self._BACKGROUND_COLOUR, padx=1, pady=1, border=1) + ) + self._init_corpus_select(self.main_frame) + self._init_results_box(self.main_frame) + self._init_paging(self.main_frame) + self._init_status(self.main_frame) + self.main_frame.pack(fill="both", expand=True) + + def _init_corpus_select(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.var = StringVar(innerframe) + self.var.set(self.model.DEFAULT_CORPUS) + Label( + innerframe, + justify=LEFT, + text=" Corpus: ", + background=self._BACKGROUND_COLOUR, + padx=2, + pady=1, + border=0, + ).pack(side="left") + + other_corpora = list(self.model.CORPORA.keys()).remove( + self.model.DEFAULT_CORPUS + ) + om = OptionMenu( + innerframe, + self.var, + self.model.DEFAULT_CORPUS, + command=self.corpus_selected, + *self.model.non_default_corpora() + ) + om["borderwidth"] = 0 + om["highlightthickness"] = 1 + om.pack(side="left") + innerframe.pack(side="top", fill="x", anchor="n") + + def _init_status(self, parent): + self.status = Label( + parent, + justify=LEFT, + relief=SUNKEN, + background=self._BACKGROUND_COLOUR, + border=0, + padx=1, + pady=0, + ) + self.status.pack(side="top", anchor="sw") + + def _init_menubar(self): + self._result_size = IntVar(self.top) + menubar = Menu(self.top) + + filemenu = Menu(menubar, tearoff=0, borderwidth=0) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + rescntmenu = Menu(editmenu, tearoff=0) + rescntmenu.add_radiobutton( + label="20", + variable=self._result_size, + underline=0, + value=20, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="50", + variable=self._result_size, + underline=0, + value=50, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="100", + variable=self._result_size, + underline=0, + value=100, + command=self.set_result_size, + ) + rescntmenu.invoke(1) + editmenu.add_cascade(label="Result Count", underline=0, menu=rescntmenu) + + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + self.top.config(menu=menubar) + + def set_result_size(self, **kwargs): + self.model.result_count = self._result_size.get() + + def _init_results_box(self, parent): + innerframe = Frame(parent) + i1 = Frame(innerframe) + i2 = Frame(innerframe) + vscrollbar = Scrollbar(i1, borderwidth=1) + hscrollbar = Scrollbar(i2, borderwidth=1, orient="horiz") + self.results_box = Text( + i1, + font=Font(family="courier", size="16"), + state="disabled", + borderwidth=1, + yscrollcommand=vscrollbar.set, + xscrollcommand=hscrollbar.set, + wrap="none", + width="40", + height="20", + exportselection=1, + ) + self.results_box.pack(side="left", fill="both", expand=True) + vscrollbar.pack(side="left", fill="y", anchor="e") + vscrollbar.config(command=self.results_box.yview) + hscrollbar.pack(side="left", fill="x", expand=True, anchor="w") + hscrollbar.config(command=self.results_box.xview) + # there is no other way of avoiding the overlap of scrollbars while using pack layout manager!!! + Label(i2, text=" ", background=self._BACKGROUND_COLOUR).pack( + side="left", anchor="e" + ) + i1.pack(side="top", fill="both", expand=True, anchor="n") + i2.pack(side="bottom", fill="x", anchor="s") + innerframe.pack(side="top", fill="both", expand=True) + + def _init_paging(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.prev = prev = Button( + innerframe, + text="Previous", + command=self.previous, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + prev.pack(side="left", anchor="center") + self.next = next = Button( + innerframe, + text="Next", + command=self.__next__, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + next.pack(side="right", anchor="center") + innerframe.pack(side="top", fill="y") + self.reset_current_page() + + def reset_current_page(self): + self.current_page = -1 + + def _poll(self): + try: + event = self.queue.get(block=False) + except q.Empty: + pass + else: + if event == CORPUS_LOADED_EVENT: + self.handle_corpus_loaded(event) + elif event == ERROR_LOADING_CORPUS_EVENT: + self.handle_error_loading_corpus(event) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def handle_error_loading_corpus(self, event): + self.status["text"] = "Error in loading " + self.var.get() + self.unfreeze_editable() + self.clear_results_box() + self.freeze_editable() + self.reset_current_page() + + def handle_corpus_loaded(self, event): + self.status["text"] = self.var.get() + " is loaded" + self.unfreeze_editable() + self.clear_results_box() + self.reset_current_page() + # self.next() + collocations = self.model.next(self.current_page + 1) + self.write_results(collocations) + self.current_page += 1 + + def corpus_selected(self, *args): + new_selection = self.var.get() + self.load_corpus(new_selection) + + def previous(self): + self.freeze_editable() + collocations = self.model.prev(self.current_page - 1) + self.current_page = self.current_page - 1 + self.clear_results_box() + self.write_results(collocations) + self.unfreeze_editable() + + def __next__(self): + self.freeze_editable() + collocations = self.model.next(self.current_page + 1) + self.clear_results_box() + self.write_results(collocations) + self.current_page += 1 + self.unfreeze_editable() + + def load_corpus(self, selection): + if self.model.selected_corpus != selection: + self.status["text"] = "Loading " + selection + "..." + self.freeze_editable() + self.model.load_corpus(selection) + + def freeze_editable(self): + self.prev["state"] = "disabled" + self.next["state"] = "disabled" + + def clear_results_box(self): + self.results_box["state"] = "normal" + self.results_box.delete("1.0", END) + self.results_box["state"] = "disabled" + + def fire_event(self, event): + # Firing an event so that rendering of widgets happen in the mainloop thread + self.top.event_generate(event, when="tail") + + def destroy(self, *e): + if self.top is None: + return + self.top.after_cancel(self.after) + self.top.destroy() + self.top = None + + def mainloop(self, *args, **kwargs): + if in_idle(): + return + self.top.mainloop(*args, **kwargs) + + def unfreeze_editable(self): + self.set_paging_button_states() + + def set_paging_button_states(self): + if self.current_page == -1 or self.current_page == 0: + self.prev["state"] = "disabled" + else: + self.prev["state"] = "normal" + if self.model.is_last_page(self.current_page): + self.next["state"] = "disabled" + else: + self.next["state"] = "normal" + + def write_results(self, results): + self.results_box["state"] = "normal" + row = 1 + for each in results: + self.results_box.insert(str(row) + ".0", each[0] + " " + each[1] + "\n") + row += 1 + self.results_box["state"] = "disabled" + + +class CollocationsModel: + def __init__(self, queue): + self.result_count = None + self.selected_corpus = None + self.collocations = None + self.CORPORA = _CORPORA + self.DEFAULT_CORPUS = _DEFAULT + self.queue = queue + self.reset_results() + + def reset_results(self): + self.result_pages = [] + self.results_returned = 0 + + def load_corpus(self, name): + self.selected_corpus = name + self.collocations = None + runner_thread = self.LoadCorpus(name, self) + runner_thread.start() + self.reset_results() + + def non_default_corpora(self): + copy = [] + copy.extend(list(self.CORPORA.keys())) + copy.remove(self.DEFAULT_CORPUS) + copy.sort() + return copy + + def is_last_page(self, number): + if number < len(self.result_pages): + return False + return self.results_returned + ( + number - len(self.result_pages) + ) * self.result_count >= len(self.collocations) + + def next(self, page): + if (len(self.result_pages) - 1) < page: + for i in range(page - (len(self.result_pages) - 1)): + self.result_pages.append( + self.collocations[ + self.results_returned : self.results_returned + + self.result_count + ] + ) + self.results_returned += self.result_count + return self.result_pages[page] + + def prev(self, page): + if page == -1: + return [] + return self.result_pages[page] + + class LoadCorpus(threading.Thread): + def __init__(self, name, model): + threading.Thread.__init__(self) + self.model, self.name = model, name + + def run(self): + try: + words = self.model.CORPORA[self.name]() + from operator import itemgetter + + text = [w for w in words if len(w) > 2] + fd = FreqDist(tuple(text[i : i + 2]) for i in range(len(text) - 1)) + vocab = FreqDist(text) + scored = [ + ((w1, w2), fd[(w1, w2)] ** 3 / (vocab[w1] * vocab[w2])) + for w1, w2 in fd + ] + scored.sort(key=itemgetter(1), reverse=True) + self.model.collocations = list(map(itemgetter(0), scored)) + self.model.queue.put(CORPUS_LOADED_EVENT) + except Exception as e: + print(e) + self.model.queue.put(ERROR_LOADING_CORPUS_EVENT) + + +# def collocations(): +# colloc_strings = [w1 + ' ' + w2 for w1, w2 in self._collocations[:num]] + + +def app(): + c = CollocationsView() + c.mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/lib/python3.10/site-packages/nltk/app/concordance_app.py b/lib/python3.10/site-packages/nltk/app/concordance_app.py new file mode 100644 index 0000000000000000000000000000000000000000..8bd9a991a0a969f87bf03986a915a0af18cd9b5f --- /dev/null +++ b/lib/python3.10/site-packages/nltk/app/concordance_app.py @@ -0,0 +1,709 @@ +# Natural Language Toolkit: Concordance Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sumukh Ghodke +# URL: +# For license information, see LICENSE.TXT + +import queue as q +import re +import threading +from tkinter import ( + END, + LEFT, + SUNKEN, + Button, + Entry, + Frame, + IntVar, + Label, + Menu, + OptionMenu, + Scrollbar, + StringVar, + Text, + Tk, +) +from tkinter.font import Font + +from nltk.corpus import ( + alpino, + brown, + cess_cat, + cess_esp, + floresta, + indian, + mac_morpho, + nps_chat, + sinica_treebank, + treebank, +) +from nltk.draw.util import ShowText +from nltk.util import in_idle + +WORD_OR_TAG = "[^/ ]+" +BOUNDARY = r"\b" + +CORPUS_LOADED_EVENT = "<>" +SEARCH_TERMINATED_EVENT = "<>" +SEARCH_ERROR_EVENT = "<>" +ERROR_LOADING_CORPUS_EVENT = "<>" + +POLL_INTERVAL = 50 + +# NB All corpora must be specified in a lambda expression so as not to be +# loaded when the module is imported. + +_DEFAULT = "English: Brown Corpus (Humor, simplified)" +_CORPORA = { + "Catalan: CESS-CAT Corpus (simplified)": lambda: cess_cat.tagged_sents( + tagset="universal" + ), + "English: Brown Corpus": lambda: brown.tagged_sents(), + "English: Brown Corpus (simplified)": lambda: brown.tagged_sents( + tagset="universal" + ), + "English: Brown Corpus (Press, simplified)": lambda: brown.tagged_sents( + categories=["news", "editorial", "reviews"], tagset="universal" + ), + "English: Brown Corpus (Religion, simplified)": lambda: brown.tagged_sents( + categories="religion", tagset="universal" + ), + "English: Brown Corpus (Learned, simplified)": lambda: brown.tagged_sents( + categories="learned", tagset="universal" + ), + "English: Brown Corpus (Science Fiction, simplified)": lambda: brown.tagged_sents( + categories="science_fiction", tagset="universal" + ), + "English: Brown Corpus (Romance, simplified)": lambda: brown.tagged_sents( + categories="romance", tagset="universal" + ), + "English: Brown Corpus (Humor, simplified)": lambda: brown.tagged_sents( + categories="humor", tagset="universal" + ), + "English: NPS Chat Corpus": lambda: nps_chat.tagged_posts(), + "English: NPS Chat Corpus (simplified)": lambda: nps_chat.tagged_posts( + tagset="universal" + ), + "English: Wall Street Journal Corpus": lambda: treebank.tagged_sents(), + "English: Wall Street Journal Corpus (simplified)": lambda: treebank.tagged_sents( + tagset="universal" + ), + "Chinese: Sinica Corpus": lambda: sinica_treebank.tagged_sents(), + "Chinese: Sinica Corpus (simplified)": lambda: sinica_treebank.tagged_sents( + tagset="universal" + ), + "Dutch: Alpino Corpus": lambda: alpino.tagged_sents(), + "Dutch: Alpino Corpus (simplified)": lambda: alpino.tagged_sents( + tagset="universal" + ), + "Hindi: Indian Languages Corpus": lambda: indian.tagged_sents(files="hindi.pos"), + "Hindi: Indian Languages Corpus (simplified)": lambda: indian.tagged_sents( + files="hindi.pos", tagset="universal" + ), + "Portuguese: Floresta Corpus (Portugal)": lambda: floresta.tagged_sents(), + "Portuguese: Floresta Corpus (Portugal, simplified)": lambda: floresta.tagged_sents( + tagset="universal" + ), + "Portuguese: MAC-MORPHO Corpus (Brazil)": lambda: mac_morpho.tagged_sents(), + "Portuguese: MAC-MORPHO Corpus (Brazil, simplified)": lambda: mac_morpho.tagged_sents( + tagset="universal" + ), + "Spanish: CESS-ESP Corpus (simplified)": lambda: cess_esp.tagged_sents( + tagset="universal" + ), +} + + +class ConcordanceSearchView: + _BACKGROUND_COLOUR = "#FFF" # white + + # Colour of highlighted results + _HIGHLIGHT_WORD_COLOUR = "#F00" # red + _HIGHLIGHT_WORD_TAG = "HL_WRD_TAG" + + _HIGHLIGHT_LABEL_COLOUR = "#C0C0C0" # dark grey + _HIGHLIGHT_LABEL_TAG = "HL_LBL_TAG" + + # Percentage of text left of the scrollbar position + _FRACTION_LEFT_TEXT = 0.30 + + def __init__(self): + self.queue = q.Queue() + self.model = ConcordanceSearchModel(self.queue) + self.top = Tk() + self._init_top(self.top) + self._init_menubar() + self._init_widgets(self.top) + self.load_corpus(self.model.DEFAULT_CORPUS) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def _init_top(self, top): + top.geometry("950x680+50+50") + top.title("NLTK Concordance Search") + top.bind("", self.destroy) + top.protocol("WM_DELETE_WINDOW", self.destroy) + top.minsize(950, 680) + + def _init_widgets(self, parent): + self.main_frame = Frame( + parent, dict(background=self._BACKGROUND_COLOUR, padx=1, pady=1, border=1) + ) + self._init_corpus_select(self.main_frame) + self._init_query_box(self.main_frame) + self._init_results_box(self.main_frame) + self._init_paging(self.main_frame) + self._init_status(self.main_frame) + self.main_frame.pack(fill="both", expand=True) + + def _init_menubar(self): + self._result_size = IntVar(self.top) + self._cntx_bf_len = IntVar(self.top) + self._cntx_af_len = IntVar(self.top) + menubar = Menu(self.top) + + filemenu = Menu(menubar, tearoff=0, borderwidth=0) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + rescntmenu = Menu(editmenu, tearoff=0) + rescntmenu.add_radiobutton( + label="20", + variable=self._result_size, + underline=0, + value=20, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="50", + variable=self._result_size, + underline=0, + value=50, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="100", + variable=self._result_size, + underline=0, + value=100, + command=self.set_result_size, + ) + rescntmenu.invoke(1) + editmenu.add_cascade(label="Result Count", underline=0, menu=rescntmenu) + + cntxmenu = Menu(editmenu, tearoff=0) + cntxbfmenu = Menu(cntxmenu, tearoff=0) + cntxbfmenu.add_radiobutton( + label="60 characters", + variable=self._cntx_bf_len, + underline=0, + value=60, + command=self.set_cntx_bf_len, + ) + cntxbfmenu.add_radiobutton( + label="80 characters", + variable=self._cntx_bf_len, + underline=0, + value=80, + command=self.set_cntx_bf_len, + ) + cntxbfmenu.add_radiobutton( + label="100 characters", + variable=self._cntx_bf_len, + underline=0, + value=100, + command=self.set_cntx_bf_len, + ) + cntxbfmenu.invoke(1) + cntxmenu.add_cascade(label="Before", underline=0, menu=cntxbfmenu) + + cntxafmenu = Menu(cntxmenu, tearoff=0) + cntxafmenu.add_radiobutton( + label="70 characters", + variable=self._cntx_af_len, + underline=0, + value=70, + command=self.set_cntx_af_len, + ) + cntxafmenu.add_radiobutton( + label="90 characters", + variable=self._cntx_af_len, + underline=0, + value=90, + command=self.set_cntx_af_len, + ) + cntxafmenu.add_radiobutton( + label="110 characters", + variable=self._cntx_af_len, + underline=0, + value=110, + command=self.set_cntx_af_len, + ) + cntxafmenu.invoke(1) + cntxmenu.add_cascade(label="After", underline=0, menu=cntxafmenu) + + editmenu.add_cascade(label="Context", underline=0, menu=cntxmenu) + + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + self.top.config(menu=menubar) + + def set_result_size(self, **kwargs): + self.model.result_count = self._result_size.get() + + def set_cntx_af_len(self, **kwargs): + self._char_after = self._cntx_af_len.get() + + def set_cntx_bf_len(self, **kwargs): + self._char_before = self._cntx_bf_len.get() + + def _init_corpus_select(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.var = StringVar(innerframe) + self.var.set(self.model.DEFAULT_CORPUS) + Label( + innerframe, + justify=LEFT, + text=" Corpus: ", + background=self._BACKGROUND_COLOUR, + padx=2, + pady=1, + border=0, + ).pack(side="left") + + other_corpora = list(self.model.CORPORA.keys()).remove( + self.model.DEFAULT_CORPUS + ) + om = OptionMenu( + innerframe, + self.var, + self.model.DEFAULT_CORPUS, + command=self.corpus_selected, + *self.model.non_default_corpora() + ) + om["borderwidth"] = 0 + om["highlightthickness"] = 1 + om.pack(side="left") + innerframe.pack(side="top", fill="x", anchor="n") + + def _init_status(self, parent): + self.status = Label( + parent, + justify=LEFT, + relief=SUNKEN, + background=self._BACKGROUND_COLOUR, + border=0, + padx=1, + pady=0, + ) + self.status.pack(side="top", anchor="sw") + + def _init_query_box(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + another = Frame(innerframe, background=self._BACKGROUND_COLOUR) + self.query_box = Entry(another, width=60) + self.query_box.pack(side="left", fill="x", pady=25, anchor="center") + self.search_button = Button( + another, + text="Search", + command=self.search, + borderwidth=1, + highlightthickness=1, + ) + self.search_button.pack(side="left", fill="x", pady=25, anchor="center") + self.query_box.bind("", self.search_enter_keypress_handler) + another.pack() + innerframe.pack(side="top", fill="x", anchor="n") + + def search_enter_keypress_handler(self, *event): + self.search() + + def _init_results_box(self, parent): + innerframe = Frame(parent) + i1 = Frame(innerframe) + i2 = Frame(innerframe) + vscrollbar = Scrollbar(i1, borderwidth=1) + hscrollbar = Scrollbar(i2, borderwidth=1, orient="horiz") + self.results_box = Text( + i1, + font=Font(family="courier", size="16"), + state="disabled", + borderwidth=1, + yscrollcommand=vscrollbar.set, + xscrollcommand=hscrollbar.set, + wrap="none", + width="40", + height="20", + exportselection=1, + ) + self.results_box.pack(side="left", fill="both", expand=True) + self.results_box.tag_config( + self._HIGHLIGHT_WORD_TAG, foreground=self._HIGHLIGHT_WORD_COLOUR + ) + self.results_box.tag_config( + self._HIGHLIGHT_LABEL_TAG, foreground=self._HIGHLIGHT_LABEL_COLOUR + ) + vscrollbar.pack(side="left", fill="y", anchor="e") + vscrollbar.config(command=self.results_box.yview) + hscrollbar.pack(side="left", fill="x", expand=True, anchor="w") + hscrollbar.config(command=self.results_box.xview) + # there is no other way of avoiding the overlap of scrollbars while using pack layout manager!!! + Label(i2, text=" ", background=self._BACKGROUND_COLOUR).pack( + side="left", anchor="e" + ) + i1.pack(side="top", fill="both", expand=True, anchor="n") + i2.pack(side="bottom", fill="x", anchor="s") + innerframe.pack(side="top", fill="both", expand=True) + + def _init_paging(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.prev = prev = Button( + innerframe, + text="Previous", + command=self.previous, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + prev.pack(side="left", anchor="center") + self.next = next = Button( + innerframe, + text="Next", + command=self.__next__, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + next.pack(side="right", anchor="center") + innerframe.pack(side="top", fill="y") + self.current_page = 0 + + def previous(self): + self.clear_results_box() + self.freeze_editable() + self.model.prev(self.current_page - 1) + + def __next__(self): + self.clear_results_box() + self.freeze_editable() + self.model.next(self.current_page + 1) + + def about(self, *e): + ABOUT = "NLTK Concordance Search Demo\n" + TITLE = "About: NLTK Concordance Search Demo" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE, parent=self.main_frame).show() + except: + ShowText(self.top, TITLE, ABOUT) + + def _bind_event_handlers(self): + self.top.bind(CORPUS_LOADED_EVENT, self.handle_corpus_loaded) + self.top.bind(SEARCH_TERMINATED_EVENT, self.handle_search_terminated) + self.top.bind(SEARCH_ERROR_EVENT, self.handle_search_error) + self.top.bind(ERROR_LOADING_CORPUS_EVENT, self.handle_error_loading_corpus) + + def _poll(self): + try: + event = self.queue.get(block=False) + except q.Empty: + pass + else: + if event == CORPUS_LOADED_EVENT: + self.handle_corpus_loaded(event) + elif event == SEARCH_TERMINATED_EVENT: + self.handle_search_terminated(event) + elif event == SEARCH_ERROR_EVENT: + self.handle_search_error(event) + elif event == ERROR_LOADING_CORPUS_EVENT: + self.handle_error_loading_corpus(event) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def handle_error_loading_corpus(self, event): + self.status["text"] = "Error in loading " + self.var.get() + self.unfreeze_editable() + self.clear_all() + self.freeze_editable() + + def handle_corpus_loaded(self, event): + self.status["text"] = self.var.get() + " is loaded" + self.unfreeze_editable() + self.clear_all() + self.query_box.focus_set() + + def handle_search_terminated(self, event): + # todo: refactor the model such that it is less state sensitive + results = self.model.get_results() + self.write_results(results) + self.status["text"] = "" + if len(results) == 0: + self.status["text"] = "No results found for " + self.model.query + else: + self.current_page = self.model.last_requested_page + self.unfreeze_editable() + self.results_box.xview_moveto(self._FRACTION_LEFT_TEXT) + + def handle_search_error(self, event): + self.status["text"] = "Error in query " + self.model.query + self.unfreeze_editable() + + def corpus_selected(self, *args): + new_selection = self.var.get() + self.load_corpus(new_selection) + + def load_corpus(self, selection): + if self.model.selected_corpus != selection: + self.status["text"] = "Loading " + selection + "..." + self.freeze_editable() + self.model.load_corpus(selection) + + def search(self): + self.current_page = 0 + self.clear_results_box() + self.model.reset_results() + query = self.query_box.get() + if len(query.strip()) == 0: + return + self.status["text"] = "Searching for " + query + self.freeze_editable() + self.model.search(query, self.current_page + 1) + + def write_results(self, results): + self.results_box["state"] = "normal" + row = 1 + for each in results: + sent, pos1, pos2 = each[0].strip(), each[1], each[2] + if len(sent) != 0: + if pos1 < self._char_before: + sent, pos1, pos2 = self.pad(sent, pos1, pos2) + sentence = sent[pos1 - self._char_before : pos1 + self._char_after] + if not row == len(results): + sentence += "\n" + self.results_box.insert(str(row) + ".0", sentence) + word_markers, label_markers = self.words_and_labels(sent, pos1, pos2) + for marker in word_markers: + self.results_box.tag_add( + self._HIGHLIGHT_WORD_TAG, + str(row) + "." + str(marker[0]), + str(row) + "." + str(marker[1]), + ) + for marker in label_markers: + self.results_box.tag_add( + self._HIGHLIGHT_LABEL_TAG, + str(row) + "." + str(marker[0]), + str(row) + "." + str(marker[1]), + ) + row += 1 + self.results_box["state"] = "disabled" + + def words_and_labels(self, sentence, pos1, pos2): + search_exp = sentence[pos1:pos2] + words, labels = [], [] + labeled_words = search_exp.split(" ") + index = 0 + for each in labeled_words: + if each == "": + index += 1 + else: + word, label = each.split("/") + words.append( + (self._char_before + index, self._char_before + index + len(word)) + ) + index += len(word) + 1 + labels.append( + (self._char_before + index, self._char_before + index + len(label)) + ) + index += len(label) + index += 1 + return words, labels + + def pad(self, sent, hstart, hend): + if hstart >= self._char_before: + return sent, hstart, hend + d = self._char_before - hstart + sent = "".join([" "] * d) + sent + return sent, hstart + d, hend + d + + def destroy(self, *e): + if self.top is None: + return + self.top.after_cancel(self.after) + self.top.destroy() + self.top = None + + def clear_all(self): + self.query_box.delete(0, END) + self.model.reset_query() + self.clear_results_box() + + def clear_results_box(self): + self.results_box["state"] = "normal" + self.results_box.delete("1.0", END) + self.results_box["state"] = "disabled" + + def freeze_editable(self): + self.query_box["state"] = "disabled" + self.search_button["state"] = "disabled" + self.prev["state"] = "disabled" + self.next["state"] = "disabled" + + def unfreeze_editable(self): + self.query_box["state"] = "normal" + self.search_button["state"] = "normal" + self.set_paging_button_states() + + def set_paging_button_states(self): + if self.current_page == 0 or self.current_page == 1: + self.prev["state"] = "disabled" + else: + self.prev["state"] = "normal" + if self.model.has_more_pages(self.current_page): + self.next["state"] = "normal" + else: + self.next["state"] = "disabled" + + def fire_event(self, event): + # Firing an event so that rendering of widgets happen in the mainloop thread + self.top.event_generate(event, when="tail") + + def mainloop(self, *args, **kwargs): + if in_idle(): + return + self.top.mainloop(*args, **kwargs) + + +class ConcordanceSearchModel: + def __init__(self, queue): + self.queue = queue + self.CORPORA = _CORPORA + self.DEFAULT_CORPUS = _DEFAULT + self.selected_corpus = None + self.reset_query() + self.reset_results() + self.result_count = None + self.last_sent_searched = 0 + + def non_default_corpora(self): + copy = [] + copy.extend(list(self.CORPORA.keys())) + copy.remove(self.DEFAULT_CORPUS) + copy.sort() + return copy + + def load_corpus(self, name): + self.selected_corpus = name + self.tagged_sents = [] + runner_thread = self.LoadCorpus(name, self) + runner_thread.start() + + def search(self, query, page): + self.query = query + self.last_requested_page = page + self.SearchCorpus(self, page, self.result_count).start() + + def next(self, page): + self.last_requested_page = page + if len(self.results) < page: + self.search(self.query, page) + else: + self.queue.put(SEARCH_TERMINATED_EVENT) + + def prev(self, page): + self.last_requested_page = page + self.queue.put(SEARCH_TERMINATED_EVENT) + + def reset_results(self): + self.last_sent_searched = 0 + self.results = [] + self.last_page = None + + def reset_query(self): + self.query = None + + def set_results(self, page, resultset): + self.results.insert(page - 1, resultset) + + def get_results(self): + return self.results[self.last_requested_page - 1] + + def has_more_pages(self, page): + if self.results == [] or self.results[0] == []: + return False + if self.last_page is None: + return True + return page < self.last_page + + class LoadCorpus(threading.Thread): + def __init__(self, name, model): + threading.Thread.__init__(self) + self.model, self.name = model, name + + def run(self): + try: + ts = self.model.CORPORA[self.name]() + self.model.tagged_sents = [ + " ".join(w + "/" + t for (w, t) in sent) for sent in ts + ] + self.model.queue.put(CORPUS_LOADED_EVENT) + except Exception as e: + print(e) + self.model.queue.put(ERROR_LOADING_CORPUS_EVENT) + + class SearchCorpus(threading.Thread): + def __init__(self, model, page, count): + self.model, self.count, self.page = model, count, page + threading.Thread.__init__(self) + + def run(self): + q = self.processed_query() + sent_pos, i, sent_count = [], 0, 0 + for sent in self.model.tagged_sents[self.model.last_sent_searched :]: + try: + m = re.search(q, sent) + except re.error: + self.model.reset_results() + self.model.queue.put(SEARCH_ERROR_EVENT) + return + if m: + sent_pos.append((sent, m.start(), m.end())) + i += 1 + if i > self.count: + self.model.last_sent_searched += sent_count - 1 + break + sent_count += 1 + if self.count >= len(sent_pos): + self.model.last_sent_searched += sent_count - 1 + self.model.last_page = self.page + self.model.set_results(self.page, sent_pos) + else: + self.model.set_results(self.page, sent_pos[:-1]) + self.model.queue.put(SEARCH_TERMINATED_EVENT) + + def processed_query(self): + new = [] + for term in self.model.query.split(): + term = re.sub(r"\.", r"[^/ ]", term) + if re.match("[A-Z]+$", term): + new.append(BOUNDARY + WORD_OR_TAG + "/" + term + BOUNDARY) + elif "/" in term: + new.append(BOUNDARY + term + BOUNDARY) + else: + new.append(BOUNDARY + term + "/" + WORD_OR_TAG + BOUNDARY) + return " ".join(new) + + +def app(): + d = ConcordanceSearchView() + d.mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/lib/python3.10/site-packages/nltk/app/nemo_app.py b/lib/python3.10/site-packages/nltk/app/nemo_app.py new file mode 100644 index 0000000000000000000000000000000000000000..df0ceb1be59e40bb48289f4f1411653789ca7a17 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/app/nemo_app.py @@ -0,0 +1,163 @@ +# Finding (and Replacing) Nemo, Version 1.1, Aristide Grange 2006/06/06 +# https://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496783 + +""" +Finding (and Replacing) Nemo + +Instant Regular Expressions +Created by Aristide Grange +""" +import itertools +import re +from tkinter import SEL_FIRST, SEL_LAST, Frame, Label, PhotoImage, Scrollbar, Text, Tk + +windowTitle = "Finding (and Replacing) Nemo" +initialFind = r"n(.*?)e(.*?)m(.*?)o" +initialRepl = r"M\1A\2K\3I" +initialText = """\ +Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. +Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. +Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. +""" +images = { + "FIND": "R0lGODlhMAAiAPcAMf/////37//35//n1v97Off///f/9/f37/fexvfOvfeEQvd7QvdrQvdrKfdaKfdSMfdSIe/v9+/v7+/v5+/n3u/e1u/Wxu/Gre+1lO+tnO+thO+Ua+97Y+97Oe97Me9rOe9rMe9jOe9jMe9jIe9aMefe5+fe3ufezuece+eEWudzQudaIedSIedKMedKIedCKedCId7e1t7Wzt7Oxt7Gvd69vd69rd61pd6ljN6UjN6Ue96EY95zY95rUt5rQt5jMd5SId5KIdbn59be3tbGztbGvda1rdaEa9Z7a9Z7WtZzQtZzOdZzMdZjMdZaQtZSOdZSMdZKMdZCKdZCGNY5Ic7W1s7Oxs7Gtc69xs69tc69rc6tpc6llM6clM6cjM6Ue86EY85zWs5rSs5SKc5KKc5KGMa1tcatrcalvcalnMaUpcZ7c8ZzMcZrUsZrOcZrMcZaQsZSOcZSMcZKMcZCKcZCGMYxIcYxGL3Gxr21tb21rb2lpb2crb2cjL2UnL2UlL2UhL2Ec717Wr17Ur1zWr1rMb1jUr1KMb1KIb1CIb0xGLWlrbWlpbWcnLWEe7V7c7VzY7VzUrVSKbVKMbVCMbVCIbU5KbUxIbUxEK2lta2lpa2clK2UjK2MnK2MlK2Ea617e61za61rY61rMa1jSq1aUq1aSq1SQq1KKa0xEKWlnKWcnKWUnKWUhKWMjKWEa6Vza6VrWqVjMaVaUqVaKaVSMaVCMaU5KaUxIaUxGJyclJyMe5yElJyEhJx7e5x7c5xrOZxaQpxSOZxKQpw5IZSMhJSEjJR7c5Rre5RrY5RrUpRSQpRSKZRCOZRCKZQxKZQxIYyEhIx7hIxza4xzY4xrc4xjUoxaa4xaUoxSSoxKQoxCMYw5GIR7c4Rzc4Rre4RjY4RjWoRaa4RSWoRSUoRSMYRKQoRCOYQ5KYQxIXtra3taY3taSntKOXtCMXtCKXNCMXM5MXMxIWtSUmtKSmtKQmtCOWs5MWs5KWs5IWNCKWMxIVIxKUIQCDkhGAAAACH+AS4ALAAAAAAwACIAAAj/AAEIHEiwoMGDCBMqXMiwoUOHMqxIeEiRoZVp7cpZ29WrF4WKIAd208dGAQEVbiTVChUjZMU9+pYQmPmBZpxgvVw+nDdKwQICNVcIXQEkTgKdDdUJ+/nggVAXK1xI3TEA6UIr2uJ8iBqka1cXXTlkqGoVYRZ7iLyqBSs0iiEtZQVKiDGxBI1u3NR6lUpGDKg8MSgEQCphU7Z22vhg0dILXRCpYLuSCcYJT4wqXASBQaBzU7klHxC127OHD7ZDJFpERqRt0x5OnwQpmZmCLEhrbgg4WIHO1RY+nbQ9WRGEDJlmnXwJ+9FBgXMCIzYMVijBBgYMFxIMqJBMSc0Ht7qh/+Gjpte2rnYsYeNlasWIBgQ6yCewIoPCCp/cyP/wgUGbXVu0QcADZNBDnh98gHMLGXYQUw02w61QU3wdbNWDbQVVIIhMMwFF1DaZiPLBAy7E04kafrjSizaK3LFNNc0AAYRQDsAHHQlJ2IDQJ2zE1+EKDjiAijShkECCC8Qgw4cr7ZgyzC2WaHPNLWWoNeNWPiRAw0QFWQFMhz8C+QQ20yAiVSrY+MGOJCsccsst2GCzoHFxxEGGC+8hgs0MB2kyCpgzrUDCbs1Es41UdtATHFFkWELMOtsoQsYcgvRRQw5RSDgGOjZMR1AvPQIq6KCo9AKOJWDd48owQlHR4DXEKP9iyRrK+DNNBTu4RwIPFeTAGUG7hAomkA84gEg1m6ADljy9PBKGGJY4ig0xlsTBRSn98FOFDUC8pwQOPkgHbCGAzhTkA850s0c7j6Hjix9+gBIrMXLeAccWXUCyiRBcBEECdEJ98KtAqtBCYQc/OvDENnl4gYpUxISCIjjzylkGGV9okYUVNogRhAOBuuAEhjG08wOgDYzAgA5bCjIoCe5uwUk80RKTTSppPREGGGCIISOQ9AXBg6cC6WIywvCpoMHAocRBwhP4bHLFLujYkV42xNxBRhAyGrc113EgYtRBerDDDHMoDCyQEL5sE083EkgwQyBhxGFHMM206DUixGxmE0wssbQjCQ4JCaFKFwgQTVAVVhQUwAVPIFJKrHfYYRwi6OCDzzuIJIFhXAD0EccPsYRiSyqKSDpFcWSMIcZRoBMkQyA2BGZDIKSYcggih8TRRg4VxM5QABVYYLxgwiev/PLMCxQQADs=", + "find": "R0lGODlhMAAiAPQAMf////f39+/v7+fn597e3tbW1s7OzsbGxr29vbW1ta2traWlpZycnJSUlIyMjISEhHt7e3Nzc2tra2NjY1paWlJSUkpKSkJCQjk5OSkpKRgYGAAAAAAAAAAAAAAAAAAAACH+AS4ALAAAAAAwACIAAAX/ICCOZGmeaKquY2AGLiuvMCAUBuHWc48Kh0iFInEYCb4kSQCxPBiMxkMigRQEgJiSFVBYHNGG0RiZOHjblWAiiY4fkDhEYoBp06dAWfyAQyKAgAwDaHgnB0RwgYASgQ0IhDuGJDAIFhMRVFSLEX8QCJJ4AQM5AgQHTZqqjBAOCQQEkWkCDRMUFQsICQ4Vm5maEwwHOAsPDTpKMAsUDlO4CssTcb+2DAp8YGCyNFoCEsZwFQ3QDRTTVBRS0g1QbgsCd5QAAwgIBwYFAwStzQ8UEdCKVchky0yVBw7YuXkAKt4IAg74vXHVagqFBRgXSCAyYWAVCH0SNhDTitCJfSL5/4RbAPKPhQYYjVCYYAvCP0BxEDaD8CheAAHNwqh8MMGPSwgLeJWhwHSjqkYI+xg4MMCEgQjtRvZ7UAYCpghMF7CxONOWJkYR+rCpY4JlVpVxKDwYWEactKW9mhYRtqCTgwgWEMArERSK1j5q//6T8KXonFsShpiJkAECgQYVjykooCVA0JGHEWNiYCHThTFeb3UkoiCCBgwGEKQ1kuAJlhFwhA71h5SukwUM5qqeCSGBgicEWkfNiWSERtBad4JNIBaQBaQah1ToyGZBAnsIuIJs1qnqiAIVjIE2gnAB1T5x0icgzXT79ipgMOOEH6HBbREBMJCeGEY08IoLAkzB1YYFwjxwSUGSNULQJnNUwRYlCcyEkALIxECAP9cNMMABYpRhy3ZsSLDaR70oUAiABGCkAxowCGCAAfDYIQACXoElGRsdXWDBdg2Y90IWktDYGYAB9PWHP0PMdFZaF07SQgAFNDAMAQg0QA1UC8xoZQl22JGFPgWkOUCOL1pZQyhjxinnnCWEAAA7", + "REPL": "R0lGODlhMAAjAPcAMf/////3//+lOf+UKf+MEPf///f39/f35/fv7/ecQvecOfecKfeUIfeUGPeUEPeUCPeMAO/37+/v9+/v3u/n3u/n1u+9jO+9c++1hO+ta++tY++tWu+tUu+tSu+lUu+lQu+lMe+UMe+UKe+UGO+UEO+UAO+MCOfv5+fvxufn7+fn5+fnzue9lOe9c+e1jOe1e+e1c+e1a+etWuetUuelQuecOeeUUueUCN7e597e3t7e1t7ezt7evd7Wzt7Oxt7Ovd7Otd7Opd7OnN7Gtd7Gpd69lN61hN6ta96lStbextberdbW3tbWztbWxtbOvdbOrda1hNalUtaECM7W1s7Ozs7Oxs7Otc7Gxs7Gvc69tc69rc69pc61jM6lc8bWlMbOvcbGxsbGpca9tca9pca1nMaMAL3OhL3Gtb21vb21tb2tpb2tnL2tlLW9tbW9pbW9e7W1pbWtjLWcKa21nK2tra2tnK2tlK2lpa2llK2ljK2le6WlnKWljKWUe6WUc6WUY5y1QpyclJycjJychJyUc5yMY5StY5SUe5SMhJSMe5SMc5SMWpSEa5SESoyUe4yMhIyEY4SlKYScWoSMe4SEe4SEa4R7c4R7Y3uMY3uEe3t7e3t7c3tza3tzY3trKXtjIXOcAHOUMXOEY3Nzc3NzWnNrSmulCGuUMWuMGGtzWmtrY2taMWtaGGOUOWOMAGNzUmNjWmNjSmNaUmNaQmNaOWNaIWNSCFqcAFpjUlpSMVpSIVpSEFpKKVKMAFJSUlJSSlJSMVJKMVJKGFJKAFI5CEqUAEqEAEpzQkpKIUpCQkpCGEpCAEo5EEoxAEJjOUJCOUJCAEI5IUIxADl7ADlaITlCOTkxMTkxKTkxEDkhADFzADFrGDE5OTExADEpEClrCCkxKSkpKSkpISkpACkhCCkhACkYACFzACFrACEhCCEYGBhjEBhjABghABgYCBgYABgQEBgQABAQABAIAAhjAAhSAAhKAAgIEAgICABaAABCAAAhAAAQAAAIAAAAAAAAACH+AS4ALAAAAAAwACMAAAj/AAEIHEiwoMGDCBMqXMiwocOHAA4cgEixIIIJO3JMmAjADIqKFU/8MHIkg5EgYXx4iaTkI0iHE6wE2TCggYILQayEAgXIy8uGCKz8sDCAQAMRG3iEcXULlJkJPwli3OFjh9UdYYLE6NBhA04UXHoVA2XoTZgfPKBWlOBDphAWOdfMcfMDLloeO3hIMjbWVCQ5Fn6E2UFxgpsgFjYIEBADrZU6luqEEfqjTqpt54z1uuWqTIcgWAk7PECGzIUQDRosDmxlUrVJkwQJkqVuX71v06YZcyUlROAdbnLAJKPFyAYFAhoMwFlnEh0rWkpz8raPHm7dqKKc/KFFkBUrVn1M/ziBcEIeLUEQI8/AYk0i9Be4sqjsrN66c9/OnbobhpR3HkIUoZ0WVnBE0AGLFKKFD0HAFUQe77HQgQI1hRBDEHMcY0899bBzihZuCPILJD8EccEGGzwAQhFaUHHQH82sUkgeNHISDBk8WCCCcsqFUEQWmOyzjz3sUGNNOO5Y48YOEgowAAQhnBScQV00k82V47jzjy9CXZBcjziFoco//4CDiSOyhPMPLkJZkEBqJmRQxA9uZGEQD8Ncmc044/zzDF2IZQBCCDYE8QMZz/iiCSx0neHGI7BIhhhNn+1gxRpokEcQAp7seWU7/PwTyxqG/iCEEVzQmUombnDRxRExzP9nBR2PCKLFD3UJwcMPa/SRqUGNWJmNOVn+M44ukMRB4KGcWDNLVhuUMEIJAlzwA3DJBHMJIXm4sQYhqyxCRQQGLSIsn1qac2UzysQSyzX/hLMGD0F0IMCODYAQBA9W/PKPOcRiw0wzwxTiokF9dLMnuv/Mo+fCZF7jBr0xbDDCACWEYKgb1vzjDp/jZNOMLX0IZxAKq2TZTjtaOjwOsXyG+s8sZJTIQsUdIGHoJPf8w487QI/TDSt5mGwQFZxc406o8HiDJchk/ltLHpSlJwSvz5DpTjvmuGNOM57koelBOaAhiCaaPBLL0wwbm003peRBnBZqJMJL1ECz/HXYYx/NdAIOOVCxQyLorswymU93o0wuwfAiTDNR/xz0MLXU0XdCE+UwSTRZAq2lsSATu+4wkGvt+TjNzPLrQyegAUku2Hij5cd8LhxyM8QIg4w18HgcdC6BTBFSDmfQqsovttveDcG7lFLHI75cE841sARCxeWsnxC4G9HADPK6ywzDCRqBo0EHHWhMgT1IJzziNci1N7PMKnSYfML96/90AiJKey/0KtbLX1QK0rrNnQ541xugQ7SHhkXBghN0SKACWRc4KlAhBwKcIOYymJCAAAA7", + "repl": "R0lGODlhMAAjAPQAMf////f39+/v7+fn597e3tbW1s7OzsbGxr29vbW1ta2traWlpZycnJSUlIyMjISEhHt7e3Nzc2tra2NjY1paWlJSUkpKSkJCQjk5OTExMSkpKSEhIRgYGBAQEAgICAAAACH+AS4ALAAAAAAwACMAAAX/ICCOZGmeaKqubOu+gCDANBkIQ1EMQhAghFptYEAkEgjEwXBo7ISvweGgWCwUysPjwTgEoCafTySYIhYMxgLBjEQgCULvCw0QdAZdoVhUIJUFChISEAxYeQM1N1OMTAp+UwZ5eA4TEhFbDWYFdC4ECVMJjwl5BwsQa0umEhUVlhESDgqlBp0rAn5nVpBMDxeZDRQbHBgWFBSWDgtLBnFjKwRYCI9VqQsPs0YKEcMXFq0UEalFDWx4BAO2IwPjppAKDkrTWKYUGd7fEJJFEZpM00cOzCgh4EE8SaoWxKNixQooBRMyZMBwAYIRBhUgLDGS4MoBJeoANMhAgQsaCRZm/5lqaCUJhA4cNHjDoKEDBlJUHqkBlYBTiQUZNGjYMMxDhY3VWk6R4MEDBoMUak5AqoYBqANIBo4wcGGDUKIeLlzVZmWJggsVIkwAZaQSA3kdZzlKkIiEAAlDvW5oOkEBs488JTw44oeUIwdvVTFTUK7uiAAPgubt8GFDhQepqETAQCFU1UMGzlqAgFhUsAcCS0AO6lUDhw8xNRSbENGDhgWSHjWUe6ACbKITizmopZoBa6KvOwj9uuHDhwxyj3xekgDDhw5EvWKo0IB4iQLCOCC/njc7ZQ8UeGvza+ABZZgcxJNc4FO1gc0cOsCUrHevc8tdIMTIAhc4F198G2Qwwd8CBIQUAwEINABBBJUwR9R5wElgVRLwWODBBx4cGB8GEzDQIAo33CGJA8gh+JoH/clUgQU0YvDhdfmJdwEFC6Sjgg8yEPAABsPkh2F22cl2AQbn6QdTghTQ5eAJAQyQAAQV0MSBB9gRVZ4GE1mw5JZOAmiAVi1UWcAZDrDyZXYTeaOhA/bIVuIBPtKQ4h7ViYekUPdcEAEbzTzCRp5CADmAAwj+ORGPBcgwAAHo9ABGCYtm0ChwFHShlRiXhmHlkAcCiOeUodqQw5W0oXLAiamy4MOkjOyAaqxUymApDCEAADs=", +} +colors = ["#FF7B39", "#80F121"] +emphColors = ["#DAFC33", "#F42548"] +fieldParams = { + "height": 3, + "width": 70, + "font": ("monaco", 14), + "highlightthickness": 0, + "borderwidth": 0, + "background": "white", +} +textParams = { + "bg": "#F7E0D4", + "fg": "#2321F1", + "highlightthickness": 0, + "width": 1, + "height": 10, + "font": ("verdana", 16), + "wrap": "word", +} + + +class Zone: + def __init__(self, image, initialField, initialText): + frm = Frame(root) + frm.config(background="white") + self.image = PhotoImage(format="gif", data=images[image.upper()]) + self.imageDimmed = PhotoImage(format="gif", data=images[image]) + self.img = Label(frm) + self.img.config(borderwidth=0) + self.img.pack(side="left") + self.fld = Text(frm, **fieldParams) + self.initScrollText(frm, self.fld, initialField) + frm = Frame(root) + self.txt = Text(frm, **textParams) + self.initScrollText(frm, self.txt, initialText) + for i in range(2): + self.txt.tag_config(colors[i], background=colors[i]) + self.txt.tag_config("emph" + colors[i], foreground=emphColors[i]) + + def initScrollText(self, frm, txt, contents): + scl = Scrollbar(frm) + scl.config(command=txt.yview) + scl.pack(side="right", fill="y") + txt.pack(side="left", expand=True, fill="x") + txt.config(yscrollcommand=scl.set) + txt.insert("1.0", contents) + frm.pack(fill="x") + Frame(height=2, bd=1, relief="ridge").pack(fill="x") + + def refresh(self): + self.colorCycle = itertools.cycle(colors) + try: + self.substitute() + self.img.config(image=self.image) + except re.error: + self.img.config(image=self.imageDimmed) + + +class FindZone(Zone): + def addTags(self, m): + color = next(self.colorCycle) + self.txt.tag_add(color, "1.0+%sc" % m.start(), "1.0+%sc" % m.end()) + try: + self.txt.tag_add( + "emph" + color, "1.0+%sc" % m.start("emph"), "1.0+%sc" % m.end("emph") + ) + except: + pass + + def substitute(self, *args): + for color in colors: + self.txt.tag_remove(color, "1.0", "end") + self.txt.tag_remove("emph" + color, "1.0", "end") + self.rex = re.compile("") # default value in case of malformed regexp + self.rex = re.compile(self.fld.get("1.0", "end")[:-1], re.MULTILINE) + try: + re.compile("(?P%s)" % self.fld.get(SEL_FIRST, SEL_LAST)) + self.rexSel = re.compile( + "%s(?P%s)%s" + % ( + self.fld.get("1.0", SEL_FIRST), + self.fld.get(SEL_FIRST, SEL_LAST), + self.fld.get(SEL_LAST, "end")[:-1], + ), + re.MULTILINE, + ) + except: + self.rexSel = self.rex + self.rexSel.sub(self.addTags, self.txt.get("1.0", "end")) + + +class ReplaceZone(Zone): + def addTags(self, m): + s = sz.rex.sub(self.repl, m.group()) + self.txt.delete( + "1.0+%sc" % (m.start() + self.diff), "1.0+%sc" % (m.end() + self.diff) + ) + self.txt.insert("1.0+%sc" % (m.start() + self.diff), s, next(self.colorCycle)) + self.diff += len(s) - (m.end() - m.start()) + + def substitute(self): + self.txt.delete("1.0", "end") + self.txt.insert("1.0", sz.txt.get("1.0", "end")[:-1]) + self.diff = 0 + self.repl = rex0.sub(r"\\g<\1>", self.fld.get("1.0", "end")[:-1]) + sz.rex.sub(self.addTags, sz.txt.get("1.0", "end")[:-1]) + + +def launchRefresh(_): + sz.fld.after_idle(sz.refresh) + rz.fld.after_idle(rz.refresh) + + +def app(): + global root, sz, rz, rex0 + root = Tk() + root.resizable(height=False, width=True) + root.title(windowTitle) + root.minsize(width=250, height=0) + sz = FindZone("find", initialFind, initialText) + sz.fld.bind("", launchRefresh) + sz.fld.bind("", launchRefresh) + sz.fld.bind("", launchRefresh) + sz.rexSel = re.compile("") + rz = ReplaceZone("repl", initialRepl, "") + rex0 = re.compile(r"(?", launchRefresh) + launchRefresh(None) + root.mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/lib/python3.10/site-packages/nltk/app/rdparser_app.py b/lib/python3.10/site-packages/nltk/app/rdparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..16de5a442659171763da4b4d19e9f56ef9db6277 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/app/rdparser_app.py @@ -0,0 +1,1052 @@ +# Natural Language Toolkit: Recursive Descent Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring the recursive descent parser. + +The recursive descent parser maintains a tree, which records the +structure of the portion of the text that has been parsed. It uses +CFG productions to expand the fringe of the tree, and matches its +leaves against the text. Initially, the tree contains the start +symbol ("S"). It is shown in the main canvas, to the right of the +list of available expansions. + +The parser builds up a tree structure for the text using three +operations: + + - "expand" uses a CFG production to add children to a node on the + fringe of the tree. + - "match" compares a leaf in the tree to a text token. + - "backtrack" returns the tree to its state before the most recent + expand or match operation. + +The parser maintains a list of tree locations called a "frontier" to +remember which nodes have not yet been expanded and which leaves have +not yet been matched against the text. The leftmost frontier node is +shown in green, and the other frontier nodes are shown in blue. The +parser always performs expand and match operations on the leftmost +element of the frontier. + +You can control the parser's operation by using the "expand," "match," +and "backtrack" buttons; or you can use the "step" button to let the +parser automatically decide which operation to apply. The parser uses +the following rules to decide which operation to apply: + + - If the leftmost frontier element is a token, try matching it. + - If the leftmost frontier element is a node, try expanding it with + the first untried expansion. + - Otherwise, backtrack. + +The "expand" button applies the untried expansion whose CFG production +is listed earliest in the grammar. To manually choose which expansion +to apply, click on a CFG production from the list of available +expansions, on the left side of the main window. + +The "autostep" button will let the parser continue applying +applications to the tree until it reaches a complete parse. You can +cancel an autostep in progress at any time by clicking on the +"autostep" button again. + +Keyboard Shortcuts:: + [Space]\t Perform the next expand, match, or backtrack operation + [a]\t Step through operations until the next complete parse + [e]\t Perform an expand operation + [m]\t Perform a match operation + [b]\t Perform a backtrack operation + [Delete]\t Reset the parser + [g]\t Show/hide available expansions list + [h]\t Help + [Ctrl-p]\t Print + [q]\t Quit +""" + +from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk +from tkinter.font import Font + +from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import CanvasFrame, EntryDialog, ShowText, TextWidget +from nltk.parse import SteppingRecursiveDescentParser +from nltk.tree import Tree +from nltk.util import in_idle + + +class RecursiveDescentApp: + """ + A graphical tool for exploring the recursive descent parser. The tool + displays the parser's tree and the remaining text, and allows the + user to control the parser's operation. In particular, the user + can expand subtrees on the frontier, match tokens on the frontier + against the text, and backtrack. A "step" button simply steps + through the parsing process, performing the operations that + ``RecursiveDescentParser`` would use. + """ + + def __init__(self, grammar, sent, trace=0): + self._sent = sent + self._parser = SteppingRecursiveDescentParser(grammar, trace) + + # Set up the main window. + self._top = Tk() + self._top.title("Recursive Descent Parser Application") + + # Set up key bindings. + self._init_bindings() + + # Initialize the fonts. + self._init_fonts(self._top) + + # Animations. animating_lock is a lock to prevent the demo + # from performing new operations while it's animating. + self._animation_frames = IntVar(self._top) + self._animation_frames.set(5) + self._animating_lock = 0 + self._autostep = 0 + + # The user can hide the grammar. + self._show_grammar = IntVar(self._top) + self._show_grammar.set(1) + + # Create the basic frames. + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_feedback(self._top) + self._init_grammar(self._top) + self._init_canvas(self._top) + + # Initialize the parser. + self._parser.initialize(self._sent) + + # Resize callback + self._canvas.bind("", self._configure) + + ######################################### + ## Initialization Helpers + ######################################### + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + if self._size.get() < 0: + big = self._size.get() - 2 + else: + big = self._size.get() + 2 + self._bigfont = Font(family="helvetica", weight="bold", size=big) + + def _init_grammar(self, parent): + # Grammar view. + self._prodframe = listframe = Frame(parent) + self._prodframe.pack(fill="both", side="left", padx=2) + self._prodlist_label = Label( + self._prodframe, font=self._boldfont, text="Available Expansions" + ) + self._prodlist_label.pack() + self._prodlist = Listbox( + self._prodframe, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._prodlist.pack(side="right", fill="both", expand=1) + + self._productions = list(self._parser.grammar().productions()) + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + self._prodlist.config(height=min(len(self._productions), 25)) + + # Add a scrollbar if there are more than 25 productions. + if len(self._productions) > 25: + listscroll = Scrollbar(self._prodframe, orient="vertical") + self._prodlist.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._prodlist.yview) + listscroll.pack(side="left", fill="y") + + # If they select a production, apply it. + self._prodlist.bind("<>", self._prodlist_select) + + def _init_bindings(self): + # Key bindings are a good thing. + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("e", self.expand) + # self._top.bind('', self.expand) + # self._top.bind('', self.expand) + self._top.bind("m", self.match) + self._top.bind("", self.match) + self._top.bind("", self.match) + self._top.bind("b", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("a", self.autostep) + # self._top.bind('', self.autostep) + self._top.bind("", self.autostep) + self._top.bind("", self.cancel_autostep) + self._top.bind("", self.step) + self._top.bind("", self.reset) + self._top.bind("", self.postscript) + # self._top.bind('', self.help) + # self._top.bind('', self.help) + self._top.bind("", self.help) + self._top.bind("", self.help) + # self._top.bind('', self.toggle_grammar) + # self._top.bind('', self.toggle_grammar) + # self._top.bind('', self.toggle_grammar) + self._top.bind("", self.edit_grammar) + self._top.bind("", self.edit_sentence) + + def _init_buttons(self, parent): + # Set up the frames. + self._buttonframe = buttonframe = Frame(parent) + buttonframe.pack(fill="none", side="bottom", padx=3, pady=2) + Button( + buttonframe, + text="Step", + background="#90c0d0", + foreground="black", + command=self.step, + ).pack(side="left") + Button( + buttonframe, + text="Autostep", + background="#90c0d0", + foreground="black", + command=self.autostep, + ).pack(side="left") + Button( + buttonframe, + text="Expand", + underline=0, + background="#90f090", + foreground="black", + command=self.expand, + ).pack(side="left") + Button( + buttonframe, + text="Match", + underline=0, + background="#90f090", + foreground="black", + command=self.match, + ).pack(side="left") + Button( + buttonframe, + text="Backtrack", + underline=0, + background="#f0a0a0", + foreground="black", + command=self.backtrack, + ).pack(side="left") + # Replace autostep... + + # self._autostep_button = Button(buttonframe, text='Autostep', + # underline=0, command=self.autostep) + # self._autostep_button.pack(side='left') + + def _configure(self, event): + self._autostep = 0 + (x1, y1, x2, y2) = self._cframe.scrollregion() + y2 = event.height - 6 + self._canvas["scrollregion"] = "%d %d %d %d" % (x1, y1, x2, y2) + self._redraw() + + def _init_feedback(self, parent): + self._feedbackframe = feedbackframe = Frame(parent) + feedbackframe.pack(fill="x", side="bottom", padx=3, pady=3) + self._lastoper_label = Label( + feedbackframe, text="Last Operation:", font=self._font + ) + self._lastoper_label.pack(side="left") + lastoperframe = Frame(feedbackframe, relief="sunken", border=1) + lastoperframe.pack(fill="x", side="right", expand=1, padx=5) + self._lastoper1 = Label( + lastoperframe, foreground="#007070", background="#f0f0f0", font=self._font + ) + self._lastoper2 = Label( + lastoperframe, + anchor="w", + width=30, + foreground="#004040", + background="#f0f0f0", + font=self._font, + ) + self._lastoper1.pack(side="left") + self._lastoper2.pack(side="left", fill="x", expand=1) + + def _init_canvas(self, parent): + self._cframe = CanvasFrame( + parent, + background="white", + # width=525, height=250, + closeenough=10, + border=2, + relief="sunken", + ) + self._cframe.pack(expand=1, fill="both", side="top", pady=2) + canvas = self._canvas = self._cframe.canvas() + + # Initially, there's no tree or text + self._tree = None + self._textwidgets = [] + self._textline = None + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Reset Parser", underline=0, command=self.reset, accelerator="Del" + ) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self.postscript, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + editmenu.add_command( + label="Edit Grammar", + underline=5, + command=self.edit_grammar, + accelerator="Ctrl-g", + ) + editmenu.add_command( + label="Edit Text", + underline=5, + command=self.edit_sentence, + accelerator="Ctrl-t", + ) + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + rulemenu = Menu(menubar, tearoff=0) + rulemenu.add_command( + label="Step", underline=1, command=self.step, accelerator="Space" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Match", underline=0, command=self.match, accelerator="Ctrl-m" + ) + rulemenu.add_command( + label="Expand", underline=0, command=self.expand, accelerator="Ctrl-e" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Backtrack", underline=0, command=self.backtrack, accelerator="Ctrl-b" + ) + menubar.add_cascade(label="Apply", underline=0, menu=rulemenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_checkbutton( + label="Show Grammar", + underline=0, + variable=self._show_grammar, + command=self._toggle_grammar, + ) + viewmenu.add_separator() + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + animatemenu = Menu(menubar, tearoff=0) + animatemenu.add_radiobutton( + label="No Animation", underline=0, variable=self._animation_frames, value=0 + ) + animatemenu.add_radiobutton( + label="Slow Animation", + underline=0, + variable=self._animation_frames, + value=10, + accelerator="-", + ) + animatemenu.add_radiobutton( + label="Normal Animation", + underline=0, + variable=self._animation_frames, + value=5, + accelerator="=", + ) + animatemenu.add_radiobutton( + label="Fast Animation", + underline=0, + variable=self._animation_frames, + value=2, + accelerator="+", + ) + menubar.add_cascade(label="Animate", underline=1, menu=animatemenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + ######################################### + ## Helper + ######################################### + + def _get(self, widget, treeloc): + for i in treeloc: + widget = widget.subtrees()[i] + if isinstance(widget, TreeSegmentWidget): + widget = widget.label() + return widget + + ######################################### + ## Main draw procedure + ######################################### + + def _redraw(self): + canvas = self._canvas + + # Delete the old tree, widgets, etc. + if self._tree is not None: + self._cframe.destroy_widget(self._tree) + for twidget in self._textwidgets: + self._cframe.destroy_widget(twidget) + if self._textline is not None: + self._canvas.delete(self._textline) + + # Draw the tree. + helv = ("helvetica", -self._size.get()) + bold = ("helvetica", -self._size.get(), "bold") + attribs = { + "tree_color": "#000000", + "tree_width": 2, + "node_font": bold, + "leaf_font": helv, + } + tree = self._parser.tree() + self._tree = tree_to_treesegment(canvas, tree, **attribs) + self._cframe.add_widget(self._tree, 30, 5) + + # Draw the text. + helv = ("helvetica", -self._size.get()) + bottom = y = self._cframe.scrollregion()[3] + self._textwidgets = [ + TextWidget(canvas, word, font=self._font) for word in self._sent + ] + for twidget in self._textwidgets: + self._cframe.add_widget(twidget, 0, 0) + twidget.move(0, bottom - twidget.bbox()[3] - 5) + y = min(y, twidget.bbox()[1]) + + # Draw a line over the text, to separate it from the tree. + self._textline = canvas.create_line(-5000, y - 5, 5000, y - 5, dash=".") + + # Highlight appropriate nodes. + self._highlight_nodes() + self._highlight_prodlist() + + # Make sure the text lines up. + self._position_text() + + def _redraw_quick(self): + # This should be more-or-less sufficient after an animation. + self._highlight_nodes() + self._highlight_prodlist() + self._position_text() + + def _highlight_nodes(self): + # Highlight the list of nodes to be checked. + bold = ("helvetica", -self._size.get(), "bold") + for treeloc in self._parser.frontier()[:1]: + self._get(self._tree, treeloc)["color"] = "#20a050" + self._get(self._tree, treeloc)["font"] = bold + for treeloc in self._parser.frontier()[1:]: + self._get(self._tree, treeloc)["color"] = "#008080" + + def _highlight_prodlist(self): + # Highlight the productions that can be expanded. + # Boy, too bad tkinter doesn't implement Listbox.itemconfig; + # that would be pretty useful here. + self._prodlist.delete(0, "end") + expandable = self._parser.expandable_productions() + untried = self._parser.untried_expandable_productions() + productions = self._productions + for index in range(len(productions)): + if productions[index] in expandable: + if productions[index] in untried: + self._prodlist.insert(index, " %s" % productions[index]) + else: + self._prodlist.insert(index, " %s (TRIED)" % productions[index]) + self._prodlist.selection_set(index) + else: + self._prodlist.insert(index, " %s" % productions[index]) + + def _position_text(self): + # Line up the text widgets that are matched against the tree + numwords = len(self._sent) + num_matched = numwords - len(self._parser.remaining_text()) + leaves = self._tree_leaves()[:num_matched] + xmax = self._tree.bbox()[0] + for i in range(0, len(leaves)): + widget = self._textwidgets[i] + leaf = leaves[i] + widget["color"] = "#006040" + leaf["color"] = "#006040" + widget.move(leaf.bbox()[0] - widget.bbox()[0], 0) + xmax = widget.bbox()[2] + 10 + + # Line up the text widgets that are not matched against the tree. + for i in range(len(leaves), numwords): + widget = self._textwidgets[i] + widget["color"] = "#a0a0a0" + widget.move(xmax - widget.bbox()[0], 0) + xmax = widget.bbox()[2] + 10 + + # If we have a complete parse, make everything green :) + if self._parser.currently_complete(): + for twidget in self._textwidgets: + twidget["color"] = "#00a000" + + # Move the matched leaves down to the text. + for i in range(0, len(leaves)): + widget = self._textwidgets[i] + leaf = leaves[i] + dy = widget.bbox()[1] - leaf.bbox()[3] - 10.0 + dy = max(dy, leaf.parent().label().bbox()[3] - leaf.bbox()[3] + 10) + leaf.move(0, dy) + + def _tree_leaves(self, tree=None): + if tree is None: + tree = self._tree + if isinstance(tree, TreeSegmentWidget): + leaves = [] + for child in tree.subtrees(): + leaves += self._tree_leaves(child) + return leaves + else: + return [tree] + + ######################################### + ## Button Callbacks + ######################################### + + def destroy(self, *e): + self._autostep = 0 + if self._top is None: + return + self._top.destroy() + self._top = None + + def reset(self, *e): + self._autostep = 0 + self._parser.initialize(self._sent) + self._lastoper1["text"] = "Reset Application" + self._lastoper2["text"] = "" + self._redraw() + + def autostep(self, *e): + if self._animation_frames.get() == 0: + self._animation_frames.set(2) + if self._autostep: + self._autostep = 0 + else: + self._autostep = 1 + self._step() + + def cancel_autostep(self, *e): + # self._autostep_button['text'] = 'Autostep' + self._autostep = 0 + + # Make sure to stop auto-stepping if we get any user input. + def step(self, *e): + self._autostep = 0 + self._step() + + def match(self, *e): + self._autostep = 0 + self._match() + + def expand(self, *e): + self._autostep = 0 + self._expand() + + def backtrack(self, *e): + self._autostep = 0 + self._backtrack() + + def _step(self): + if self._animating_lock: + return + + # Try expanding, matching, and backtracking (in that order) + if self._expand(): + pass + elif self._parser.untried_match() and self._match(): + pass + elif self._backtrack(): + pass + else: + self._lastoper1["text"] = "Finished" + self._lastoper2["text"] = "" + self._autostep = 0 + + # Check if we just completed a parse. + if self._parser.currently_complete(): + self._autostep = 0 + self._lastoper2["text"] += " [COMPLETE PARSE]" + + def _expand(self, *e): + if self._animating_lock: + return + old_frontier = self._parser.frontier() + rv = self._parser.expand() + if rv is not None: + self._lastoper1["text"] = "Expand:" + self._lastoper2["text"] = rv + self._prodlist.selection_clear(0, "end") + index = self._productions.index(rv) + self._prodlist.selection_set(index) + self._animate_expand(old_frontier[0]) + return True + else: + self._lastoper1["text"] = "Expand:" + self._lastoper2["text"] = "(all expansions tried)" + return False + + def _match(self, *e): + if self._animating_lock: + return + old_frontier = self._parser.frontier() + rv = self._parser.match() + if rv is not None: + self._lastoper1["text"] = "Match:" + self._lastoper2["text"] = rv + self._animate_match(old_frontier[0]) + return True + else: + self._lastoper1["text"] = "Match:" + self._lastoper2["text"] = "(failed)" + return False + + def _backtrack(self, *e): + if self._animating_lock: + return + if self._parser.backtrack(): + elt = self._parser.tree() + for i in self._parser.frontier()[0]: + elt = elt[i] + self._lastoper1["text"] = "Backtrack" + self._lastoper2["text"] = "" + if isinstance(elt, Tree): + self._animate_backtrack(self._parser.frontier()[0]) + else: + self._animate_match_backtrack(self._parser.frontier()[0]) + return True + else: + self._autostep = 0 + self._lastoper1["text"] = "Finished" + self._lastoper2["text"] = "" + return False + + def about(self, *e): + ABOUT = ( + "NLTK Recursive Descent Parser Application\n" + "Written by Edward Loper" + ) + TITLE = "About: Recursive Descent Parser Application" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self._top, TITLE, ABOUT) + + def help(self, *e): + self._autostep = 0 + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._top, + "Help: Recursive Descent Parser Application", + (__doc__ or "").strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._top, + "Help: Recursive Descent Parser Application", + (__doc__ or "").strip(), + width=75, + ) + + def postscript(self, *e): + self._autostep = 0 + self._cframe.print_to_file() + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._boldfont.configure(size=-(abs(size))) + self._sysfont.configure(size=-(abs(size))) + self._bigfont.configure(size=-(abs(size + 2))) + self._redraw() + + ######################################### + ## Expand Production Selection + ######################################### + + def _toggle_grammar(self, *e): + if self._show_grammar.get(): + self._prodframe.pack( + fill="both", side="left", padx=2, after=self._feedbackframe + ) + self._lastoper1["text"] = "Show Grammar" + else: + self._prodframe.pack_forget() + self._lastoper1["text"] = "Hide Grammar" + self._lastoper2["text"] = "" + + # def toggle_grammar(self, *e): + # self._show_grammar = not self._show_grammar + # if self._show_grammar: + # self._prodframe.pack(fill='both', expand='y', side='left', + # after=self._feedbackframe) + # self._lastoper1['text'] = 'Show Grammar' + # else: + # self._prodframe.pack_forget() + # self._lastoper1['text'] = 'Hide Grammar' + # self._lastoper2['text'] = '' + + def _prodlist_select(self, event): + selection = self._prodlist.curselection() + if len(selection) != 1: + return + index = int(selection[0]) + old_frontier = self._parser.frontier() + production = self._parser.expand(self._productions[index]) + + if production: + self._lastoper1["text"] = "Expand:" + self._lastoper2["text"] = production + self._prodlist.selection_clear(0, "end") + self._prodlist.selection_set(index) + self._animate_expand(old_frontier[0]) + else: + # Reset the production selections. + self._prodlist.selection_clear(0, "end") + for prod in self._parser.expandable_productions(): + index = self._productions.index(prod) + self._prodlist.selection_set(index) + + ######################################### + ## Animation + ######################################### + + def _animate_expand(self, treeloc): + oldwidget = self._get(self._tree, treeloc) + oldtree = oldwidget.parent() + top = not isinstance(oldtree.parent(), TreeSegmentWidget) + + tree = self._parser.tree() + for i in treeloc: + tree = tree[i] + + widget = tree_to_treesegment( + self._canvas, + tree, + node_font=self._boldfont, + leaf_color="white", + tree_width=2, + tree_color="white", + node_color="white", + leaf_font=self._font, + ) + widget.label()["color"] = "#20a050" + + (oldx, oldy) = oldtree.label().bbox()[:2] + (newx, newy) = widget.label().bbox()[:2] + widget.move(oldx - newx, oldy - newy) + + if top: + self._cframe.add_widget(widget, 0, 5) + widget.move(30 - widget.label().bbox()[0], 0) + self._tree = widget + else: + oldtree.parent().replace_child(oldtree, widget) + + # Move the children over so they don't overlap. + # Line the children up in a strange way. + if widget.subtrees(): + dx = ( + oldx + + widget.label().width() / 2 + - widget.subtrees()[0].bbox()[0] / 2 + - widget.subtrees()[0].bbox()[2] / 2 + ) + for subtree in widget.subtrees(): + subtree.move(dx, 0) + + self._makeroom(widget) + + if top: + self._cframe.destroy_widget(oldtree) + else: + oldtree.destroy() + + colors = [ + "gray%d" % (10 * int(10 * x / self._animation_frames.get())) + for x in range(self._animation_frames.get(), 0, -1) + ] + + # Move the text string down, if necessary. + dy = widget.bbox()[3] + 30 - self._canvas.coords(self._textline)[1] + if dy > 0: + for twidget in self._textwidgets: + twidget.move(0, dy) + self._canvas.move(self._textline, 0, dy) + + self._animate_expand_frame(widget, colors) + + def _makeroom(self, treeseg): + """ + Make sure that no sibling tree bbox's overlap. + """ + parent = treeseg.parent() + if not isinstance(parent, TreeSegmentWidget): + return + + index = parent.subtrees().index(treeseg) + + # Handle siblings to the right + rsiblings = parent.subtrees()[index + 1 :] + if rsiblings: + dx = treeseg.bbox()[2] - rsiblings[0].bbox()[0] + 10 + for sibling in rsiblings: + sibling.move(dx, 0) + + # Handle siblings to the left + if index > 0: + lsibling = parent.subtrees()[index - 1] + dx = max(0, lsibling.bbox()[2] - treeseg.bbox()[0] + 10) + treeseg.move(dx, 0) + + # Keep working up the tree. + self._makeroom(parent) + + def _animate_expand_frame(self, widget, colors): + if len(colors) > 0: + self._animating_lock = 1 + widget["color"] = colors[0] + for subtree in widget.subtrees(): + if isinstance(subtree, TreeSegmentWidget): + subtree.label()["color"] = colors[0] + else: + subtree["color"] = colors[0] + self._top.after(50, self._animate_expand_frame, widget, colors[1:]) + else: + widget["color"] = "black" + for subtree in widget.subtrees(): + if isinstance(subtree, TreeSegmentWidget): + subtree.label()["color"] = "black" + else: + subtree["color"] = "black" + self._redraw_quick() + widget.label()["color"] = "black" + self._animating_lock = 0 + if self._autostep: + self._step() + + def _animate_backtrack(self, treeloc): + # Flash red first, if we're animating. + if self._animation_frames.get() == 0: + colors = [] + else: + colors = ["#a00000", "#000000", "#a00000"] + colors += [ + "gray%d" % (10 * int(10 * x / (self._animation_frames.get()))) + for x in range(1, self._animation_frames.get() + 1) + ] + + widgets = [self._get(self._tree, treeloc).parent()] + for subtree in widgets[0].subtrees(): + if isinstance(subtree, TreeSegmentWidget): + widgets.append(subtree.label()) + else: + widgets.append(subtree) + + self._animate_backtrack_frame(widgets, colors) + + def _animate_backtrack_frame(self, widgets, colors): + if len(colors) > 0: + self._animating_lock = 1 + for widget in widgets: + widget["color"] = colors[0] + self._top.after(50, self._animate_backtrack_frame, widgets, colors[1:]) + else: + for widget in widgets[0].subtrees(): + widgets[0].remove_child(widget) + widget.destroy() + self._redraw_quick() + self._animating_lock = 0 + if self._autostep: + self._step() + + def _animate_match_backtrack(self, treeloc): + widget = self._get(self._tree, treeloc) + node = widget.parent().label() + dy = (node.bbox()[3] - widget.bbox()[1] + 14) / max( + 1, self._animation_frames.get() + ) + self._animate_match_backtrack_frame(self._animation_frames.get(), widget, dy) + + def _animate_match(self, treeloc): + widget = self._get(self._tree, treeloc) + + dy = (self._textwidgets[0].bbox()[1] - widget.bbox()[3] - 10.0) / max( + 1, self._animation_frames.get() + ) + self._animate_match_frame(self._animation_frames.get(), widget, dy) + + def _animate_match_frame(self, frame, widget, dy): + if frame > 0: + self._animating_lock = 1 + widget.move(0, dy) + self._top.after(10, self._animate_match_frame, frame - 1, widget, dy) + else: + widget["color"] = "#006040" + self._redraw_quick() + self._animating_lock = 0 + if self._autostep: + self._step() + + def _animate_match_backtrack_frame(self, frame, widget, dy): + if frame > 0: + self._animating_lock = 1 + widget.move(0, dy) + self._top.after( + 10, self._animate_match_backtrack_frame, frame - 1, widget, dy + ) + else: + widget.parent().remove_child(widget) + widget.destroy() + self._animating_lock = 0 + if self._autostep: + self._step() + + def edit_grammar(self, *e): + CFGEditor(self._top, self._parser.grammar(), self.set_grammar) + + def set_grammar(self, grammar): + self._parser.set_grammar(grammar) + self._productions = list(grammar.productions()) + self._prodlist.delete(0, "end") + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + + def edit_sentence(self, *e): + sentence = " ".join(self._sent) + title = "Edit Text" + instr = "Enter a new sentence to parse." + EntryDialog(self._top, sentence, instr, self.set_sentence, title) + + def set_sentence(self, sentence): + self._sent = sentence.split() # [XX] use tagged? + self.reset() + + +def app(): + """ + Create a recursive descent parser demo, using a simple grammar and + text. + """ + from nltk.grammar import CFG + + grammar = CFG.fromstring( + """ + # Grammatical productions. + S -> NP VP + NP -> Det N PP | Det N + VP -> V NP PP | V NP | V + PP -> P NP + # Lexical productions. + NP -> 'I' + Det -> 'the' | 'a' + N -> 'man' | 'park' | 'dog' | 'telescope' + V -> 'ate' | 'saw' + P -> 'in' | 'under' | 'with' + """ + ) + + sent = "the dog saw a man in the park".split() + + RecursiveDescentApp(grammar, sent).mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/lib/python3.10/site-packages/nltk/app/srparser_app.py b/lib/python3.10/site-packages/nltk/app/srparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..cca5cb2de2149cc573b6d471cd5fef2a57cbbb7d --- /dev/null +++ b/lib/python3.10/site-packages/nltk/app/srparser_app.py @@ -0,0 +1,937 @@ +# Natural Language Toolkit: Shift-Reduce Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring the shift-reduce parser. + +The shift-reduce parser maintains a stack, which records the structure +of the portion of the text that has been parsed. The stack is +initially empty. Its contents are shown on the left side of the main +canvas. + +On the right side of the main canvas is the remaining text. This is +the portion of the text which has not yet been considered by the +parser. + +The parser builds up a tree structure for the text using two +operations: + + - "shift" moves the first token from the remaining text to the top + of the stack. In the demo, the top of the stack is its right-hand + side. + - "reduce" uses a grammar production to combine the rightmost stack + elements into a single tree token. + +You can control the parser's operation by using the "shift" and +"reduce" buttons; or you can use the "step" button to let the parser +automatically decide which operation to apply. The parser uses the +following rules to decide which operation to apply: + + - Only shift if no reductions are available. + - If multiple reductions are available, then apply the reduction + whose CFG production is listed earliest in the grammar. + +The "reduce" button applies the reduction whose CFG production is +listed earliest in the grammar. There are two ways to manually choose +which reduction to apply: + + - Click on a CFG production from the list of available reductions, + on the left side of the main window. The reduction based on that + production will be applied to the top of the stack. + - Click on one of the stack elements. A popup window will appear, + containing all available reductions. Select one, and it will be + applied to the top of the stack. + +Note that reductions can only be applied to the top of the stack. + +Keyboard Shortcuts:: + [Space]\t Perform the next shift or reduce operation + [s]\t Perform a shift operation + [r]\t Perform a reduction operation + [Ctrl-z]\t Undo most recent operation + [Delete]\t Reset the parser + [g]\t Show/hide available production list + [Ctrl-a]\t Toggle animations + [h]\t Help + [Ctrl-p]\t Print + [q]\t Quit + +""" + +from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk +from tkinter.font import Font + +from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import CanvasFrame, EntryDialog, ShowText, TextWidget +from nltk.parse import SteppingShiftReduceParser +from nltk.tree import Tree +from nltk.util import in_idle + +""" +Possible future improvements: + - button/window to change and/or select text. Just pop up a window + with an entry, and let them modify the text; and then retokenize + it? Maybe give a warning if it contains tokens whose types are + not in the grammar. + - button/window to change and/or select grammar. Select from + several alternative grammars? Or actually change the grammar? If + the later, then I'd want to define nltk.draw.cfg, which would be + responsible for that. +""" + + +class ShiftReduceApp: + """ + A graphical tool for exploring the shift-reduce parser. The tool + displays the parser's stack and the remaining text, and allows the + user to control the parser's operation. In particular, the user + can shift tokens onto the stack, and can perform reductions on the + top elements of the stack. A "step" button simply steps through + the parsing process, performing the operations that + ``nltk.parse.ShiftReduceParser`` would use. + """ + + def __init__(self, grammar, sent, trace=0): + self._sent = sent + self._parser = SteppingShiftReduceParser(grammar, trace) + + # Set up the main window. + self._top = Tk() + self._top.title("Shift Reduce Parser Application") + + # Animations. animating_lock is a lock to prevent the demo + # from performing new operations while it's animating. + self._animating_lock = 0 + self._animate = IntVar(self._top) + self._animate.set(10) # = medium + + # The user can hide the grammar. + self._show_grammar = IntVar(self._top) + self._show_grammar.set(1) + + # Initialize fonts. + self._init_fonts(self._top) + + # Set up key bindings. + self._init_bindings() + + # Create the basic frames. + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_feedback(self._top) + self._init_grammar(self._top) + self._init_canvas(self._top) + + # A popup menu for reducing. + self._reduce_menu = Menu(self._canvas, tearoff=0) + + # Reset the demo, and set the feedback frame to empty. + self.reset() + self._lastoper1["text"] = "" + + ######################################### + ## Initialization Helpers + ######################################### + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + + def _init_grammar(self, parent): + # Grammar view. + self._prodframe = listframe = Frame(parent) + self._prodframe.pack(fill="both", side="left", padx=2) + self._prodlist_label = Label( + self._prodframe, font=self._boldfont, text="Available Reductions" + ) + self._prodlist_label.pack() + self._prodlist = Listbox( + self._prodframe, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._prodlist.pack(side="right", fill="both", expand=1) + + self._productions = list(self._parser.grammar().productions()) + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + self._prodlist.config(height=min(len(self._productions), 25)) + + # Add a scrollbar if there are more than 25 productions. + if 1: # len(self._productions) > 25: + listscroll = Scrollbar(self._prodframe, orient="vertical") + self._prodlist.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._prodlist.yview) + listscroll.pack(side="left", fill="y") + + # If they select a production, apply it. + self._prodlist.bind("<>", self._prodlist_select) + + # When they hover over a production, highlight it. + self._hover = -1 + self._prodlist.bind("", self._highlight_hover) + self._prodlist.bind("", self._clear_hover) + + def _init_bindings(self): + # Quit + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + + # Ops (step, shift, reduce, undo) + self._top.bind("", self.step) + self._top.bind("", self.shift) + self._top.bind("", self.shift) + self._top.bind("", self.shift) + self._top.bind("", self.reduce) + self._top.bind("", self.reduce) + self._top.bind("", self.reduce) + self._top.bind("", self.reset) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + + # Misc + self._top.bind("", self.postscript) + self._top.bind("", self.help) + self._top.bind("", self.help) + self._top.bind("", self.edit_grammar) + self._top.bind("", self.edit_sentence) + + # Animation speed control + self._top.bind("-", lambda e, a=self._animate: a.set(20)) + self._top.bind("=", lambda e, a=self._animate: a.set(10)) + self._top.bind("+", lambda e, a=self._animate: a.set(4)) + + def _init_buttons(self, parent): + # Set up the frames. + self._buttonframe = buttonframe = Frame(parent) + buttonframe.pack(fill="none", side="bottom") + Button( + buttonframe, + text="Step", + background="#90c0d0", + foreground="black", + command=self.step, + ).pack(side="left") + Button( + buttonframe, + text="Shift", + underline=0, + background="#90f090", + foreground="black", + command=self.shift, + ).pack(side="left") + Button( + buttonframe, + text="Reduce", + underline=0, + background="#90f090", + foreground="black", + command=self.reduce, + ).pack(side="left") + Button( + buttonframe, + text="Undo", + underline=0, + background="#f0a0a0", + foreground="black", + command=self.undo, + ).pack(side="left") + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Reset Parser", underline=0, command=self.reset, accelerator="Del" + ) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self.postscript, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + editmenu.add_command( + label="Edit Grammar", + underline=5, + command=self.edit_grammar, + accelerator="Ctrl-g", + ) + editmenu.add_command( + label="Edit Text", + underline=5, + command=self.edit_sentence, + accelerator="Ctrl-t", + ) + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + rulemenu = Menu(menubar, tearoff=0) + rulemenu.add_command( + label="Step", underline=1, command=self.step, accelerator="Space" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Shift", underline=0, command=self.shift, accelerator="Ctrl-s" + ) + rulemenu.add_command( + label="Reduce", underline=0, command=self.reduce, accelerator="Ctrl-r" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Undo", underline=0, command=self.undo, accelerator="Ctrl-u" + ) + menubar.add_cascade(label="Apply", underline=0, menu=rulemenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_checkbutton( + label="Show Grammar", + underline=0, + variable=self._show_grammar, + command=self._toggle_grammar, + ) + viewmenu.add_separator() + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + animatemenu = Menu(menubar, tearoff=0) + animatemenu.add_radiobutton( + label="No Animation", underline=0, variable=self._animate, value=0 + ) + animatemenu.add_radiobutton( + label="Slow Animation", + underline=0, + variable=self._animate, + value=20, + accelerator="-", + ) + animatemenu.add_radiobutton( + label="Normal Animation", + underline=0, + variable=self._animate, + value=10, + accelerator="=", + ) + animatemenu.add_radiobutton( + label="Fast Animation", + underline=0, + variable=self._animate, + value=4, + accelerator="+", + ) + menubar.add_cascade(label="Animate", underline=1, menu=animatemenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + def _init_feedback(self, parent): + self._feedbackframe = feedbackframe = Frame(parent) + feedbackframe.pack(fill="x", side="bottom", padx=3, pady=3) + self._lastoper_label = Label( + feedbackframe, text="Last Operation:", font=self._font + ) + self._lastoper_label.pack(side="left") + lastoperframe = Frame(feedbackframe, relief="sunken", border=1) + lastoperframe.pack(fill="x", side="right", expand=1, padx=5) + self._lastoper1 = Label( + lastoperframe, foreground="#007070", background="#f0f0f0", font=self._font + ) + self._lastoper2 = Label( + lastoperframe, + anchor="w", + width=30, + foreground="#004040", + background="#f0f0f0", + font=self._font, + ) + self._lastoper1.pack(side="left") + self._lastoper2.pack(side="left", fill="x", expand=1) + + def _init_canvas(self, parent): + self._cframe = CanvasFrame( + parent, + background="white", + width=525, + closeenough=10, + border=2, + relief="sunken", + ) + self._cframe.pack(expand=1, fill="both", side="top", pady=2) + canvas = self._canvas = self._cframe.canvas() + + self._stackwidgets = [] + self._rtextwidgets = [] + self._titlebar = canvas.create_rectangle( + 0, 0, 0, 0, fill="#c0f0f0", outline="black" + ) + self._exprline = canvas.create_line(0, 0, 0, 0, dash=".") + self._stacktop = canvas.create_line(0, 0, 0, 0, fill="#408080") + size = self._size.get() + 4 + self._stacklabel = TextWidget( + canvas, "Stack", color="#004040", font=self._boldfont + ) + self._rtextlabel = TextWidget( + canvas, "Remaining Text", color="#004040", font=self._boldfont + ) + self._cframe.add_widget(self._stacklabel) + self._cframe.add_widget(self._rtextlabel) + + ######################################### + ## Main draw procedure + ######################################### + + def _redraw(self): + scrollregion = self._canvas["scrollregion"].split() + (cx1, cy1, cx2, cy2) = (int(c) for c in scrollregion) + + # Delete the old stack & rtext widgets. + for stackwidget in self._stackwidgets: + self._cframe.destroy_widget(stackwidget) + self._stackwidgets = [] + for rtextwidget in self._rtextwidgets: + self._cframe.destroy_widget(rtextwidget) + self._rtextwidgets = [] + + # Position the titlebar & exprline + (x1, y1, x2, y2) = self._stacklabel.bbox() + y = y2 - y1 + 10 + self._canvas.coords(self._titlebar, -5000, 0, 5000, y - 4) + self._canvas.coords(self._exprline, 0, y * 2 - 10, 5000, y * 2 - 10) + + # Position the titlebar labels.. + (x1, y1, x2, y2) = self._stacklabel.bbox() + self._stacklabel.move(5 - x1, 3 - y1) + (x1, y1, x2, y2) = self._rtextlabel.bbox() + self._rtextlabel.move(cx2 - x2 - 5, 3 - y1) + + # Draw the stack. + stackx = 5 + for tok in self._parser.stack(): + if isinstance(tok, Tree): + attribs = { + "tree_color": "#4080a0", + "tree_width": 2, + "node_font": self._boldfont, + "node_color": "#006060", + "leaf_color": "#006060", + "leaf_font": self._font, + } + widget = tree_to_treesegment(self._canvas, tok, **attribs) + widget.label()["color"] = "#000000" + else: + widget = TextWidget(self._canvas, tok, color="#000000", font=self._font) + widget.bind_click(self._popup_reduce) + self._stackwidgets.append(widget) + self._cframe.add_widget(widget, stackx, y) + stackx = widget.bbox()[2] + 10 + + # Draw the remaining text. + rtextwidth = 0 + for tok in self._parser.remaining_text(): + widget = TextWidget(self._canvas, tok, color="#000000", font=self._font) + self._rtextwidgets.append(widget) + self._cframe.add_widget(widget, rtextwidth, y) + rtextwidth = widget.bbox()[2] + 4 + + # Allow enough room to shift the next token (for animations) + if len(self._rtextwidgets) > 0: + stackx += self._rtextwidgets[0].width() + + # Move the remaining text to the correct location (keep it + # right-justified, when possible); and move the remaining text + # label, if necessary. + stackx = max(stackx, self._stacklabel.width() + 25) + rlabelwidth = self._rtextlabel.width() + 10 + if stackx >= cx2 - max(rtextwidth, rlabelwidth): + cx2 = stackx + max(rtextwidth, rlabelwidth) + for rtextwidget in self._rtextwidgets: + rtextwidget.move(4 + cx2 - rtextwidth, 0) + self._rtextlabel.move(cx2 - self._rtextlabel.bbox()[2] - 5, 0) + + midx = (stackx + cx2 - max(rtextwidth, rlabelwidth)) / 2 + self._canvas.coords(self._stacktop, midx, 0, midx, 5000) + (x1, y1, x2, y2) = self._stacklabel.bbox() + + # Set up binding to allow them to shift a token by dragging it. + if len(self._rtextwidgets) > 0: + + def drag_shift(widget, midx=midx, self=self): + if widget.bbox()[0] < midx: + self.shift() + else: + self._redraw() + + self._rtextwidgets[0].bind_drag(drag_shift) + self._rtextwidgets[0].bind_click(self.shift) + + # Draw the stack top. + self._highlight_productions() + + def _draw_stack_top(self, widget): + # hack.. + midx = widget.bbox()[2] + 50 + self._canvas.coords(self._stacktop, midx, 0, midx, 5000) + + def _highlight_productions(self): + # Highlight the productions that can be reduced. + self._prodlist.selection_clear(0, "end") + for prod in self._parser.reducible_productions(): + index = self._productions.index(prod) + self._prodlist.selection_set(index) + + ######################################### + ## Button Callbacks + ######################################### + + def destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def reset(self, *e): + self._parser.initialize(self._sent) + self._lastoper1["text"] = "Reset App" + self._lastoper2["text"] = "" + self._redraw() + + def step(self, *e): + if self.reduce(): + return True + elif self.shift(): + return True + else: + if list(self._parser.parses()): + self._lastoper1["text"] = "Finished:" + self._lastoper2["text"] = "Success" + else: + self._lastoper1["text"] = "Finished:" + self._lastoper2["text"] = "Failure" + + def shift(self, *e): + if self._animating_lock: + return + if self._parser.shift(): + tok = self._parser.stack()[-1] + self._lastoper1["text"] = "Shift:" + self._lastoper2["text"] = "%r" % tok + if self._animate.get(): + self._animate_shift() + else: + self._redraw() + return True + return False + + def reduce(self, *e): + if self._animating_lock: + return + production = self._parser.reduce() + if production: + self._lastoper1["text"] = "Reduce:" + self._lastoper2["text"] = "%s" % production + if self._animate.get(): + self._animate_reduce() + else: + self._redraw() + return production + + def undo(self, *e): + if self._animating_lock: + return + if self._parser.undo(): + self._redraw() + + def postscript(self, *e): + self._cframe.print_to_file() + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + ######################################### + ## Menubar callbacks + ######################################### + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._boldfont.configure(size=-(abs(size))) + self._sysfont.configure(size=-(abs(size))) + + # self._stacklabel['font'] = ('helvetica', -size-4, 'bold') + # self._rtextlabel['font'] = ('helvetica', -size-4, 'bold') + # self._lastoper_label['font'] = ('helvetica', -size) + # self._lastoper1['font'] = ('helvetica', -size) + # self._lastoper2['font'] = ('helvetica', -size) + # self._prodlist['font'] = ('helvetica', -size) + # self._prodlist_label['font'] = ('helvetica', -size-2, 'bold') + self._redraw() + + def help(self, *e): + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._top, + "Help: Shift-Reduce Parser Application", + (__doc__ or "").strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._top, + "Help: Shift-Reduce Parser Application", + (__doc__ or "").strip(), + width=75, + ) + + def about(self, *e): + ABOUT = "NLTK Shift-Reduce Parser Application\n" + "Written by Edward Loper" + TITLE = "About: Shift-Reduce Parser Application" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self._top, TITLE, ABOUT) + + def edit_grammar(self, *e): + CFGEditor(self._top, self._parser.grammar(), self.set_grammar) + + def set_grammar(self, grammar): + self._parser.set_grammar(grammar) + self._productions = list(grammar.productions()) + self._prodlist.delete(0, "end") + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + + def edit_sentence(self, *e): + sentence = " ".join(self._sent) + title = "Edit Text" + instr = "Enter a new sentence to parse." + EntryDialog(self._top, sentence, instr, self.set_sentence, title) + + def set_sentence(self, sent): + self._sent = sent.split() # [XX] use tagged? + self.reset() + + ######################################### + ## Reduce Production Selection + ######################################### + + def _toggle_grammar(self, *e): + if self._show_grammar.get(): + self._prodframe.pack( + fill="both", side="left", padx=2, after=self._feedbackframe + ) + self._lastoper1["text"] = "Show Grammar" + else: + self._prodframe.pack_forget() + self._lastoper1["text"] = "Hide Grammar" + self._lastoper2["text"] = "" + + def _prodlist_select(self, event): + selection = self._prodlist.curselection() + if len(selection) != 1: + return + index = int(selection[0]) + production = self._parser.reduce(self._productions[index]) + if production: + self._lastoper1["text"] = "Reduce:" + self._lastoper2["text"] = "%s" % production + if self._animate.get(): + self._animate_reduce() + else: + self._redraw() + else: + # Reset the production selections. + self._prodlist.selection_clear(0, "end") + for prod in self._parser.reducible_productions(): + index = self._productions.index(prod) + self._prodlist.selection_set(index) + + def _popup_reduce(self, widget): + # Remove old commands. + productions = self._parser.reducible_productions() + if len(productions) == 0: + return + + self._reduce_menu.delete(0, "end") + for production in productions: + self._reduce_menu.add_command(label=str(production), command=self.reduce) + self._reduce_menu.post( + self._canvas.winfo_pointerx(), self._canvas.winfo_pointery() + ) + + ######################################### + ## Animations + ######################################### + + def _animate_shift(self): + # What widget are we shifting? + widget = self._rtextwidgets[0] + + # Where are we shifting from & to? + right = widget.bbox()[0] + if len(self._stackwidgets) == 0: + left = 5 + else: + left = self._stackwidgets[-1].bbox()[2] + 10 + + # Start animating. + dt = self._animate.get() + dx = (left - right) * 1.0 / dt + self._animate_shift_frame(dt, widget, dx) + + def _animate_shift_frame(self, frame, widget, dx): + if frame > 0: + self._animating_lock = 1 + widget.move(dx, 0) + self._top.after(10, self._animate_shift_frame, frame - 1, widget, dx) + else: + # but: stacktop?? + + # Shift the widget to the stack. + del self._rtextwidgets[0] + self._stackwidgets.append(widget) + self._animating_lock = 0 + + # Display the available productions. + self._draw_stack_top(widget) + self._highlight_productions() + + def _animate_reduce(self): + # What widgets are we shifting? + numwidgets = len(self._parser.stack()[-1]) # number of children + widgets = self._stackwidgets[-numwidgets:] + + # How far are we moving? + if isinstance(widgets[0], TreeSegmentWidget): + ydist = 15 + widgets[0].label().height() + else: + ydist = 15 + widgets[0].height() + + # Start animating. + dt = self._animate.get() + dy = ydist * 2.0 / dt + self._animate_reduce_frame(dt / 2, widgets, dy) + + def _animate_reduce_frame(self, frame, widgets, dy): + if frame > 0: + self._animating_lock = 1 + for widget in widgets: + widget.move(0, dy) + self._top.after(10, self._animate_reduce_frame, frame - 1, widgets, dy) + else: + del self._stackwidgets[-len(widgets) :] + for widget in widgets: + self._cframe.remove_widget(widget) + tok = self._parser.stack()[-1] + if not isinstance(tok, Tree): + raise ValueError() + label = TextWidget( + self._canvas, str(tok.label()), color="#006060", font=self._boldfont + ) + widget = TreeSegmentWidget(self._canvas, label, widgets, width=2) + (x1, y1, x2, y2) = self._stacklabel.bbox() + y = y2 - y1 + 10 + if not self._stackwidgets: + x = 5 + else: + x = self._stackwidgets[-1].bbox()[2] + 10 + self._cframe.add_widget(widget, x, y) + self._stackwidgets.append(widget) + + # Display the available productions. + self._draw_stack_top(widget) + self._highlight_productions() + + # # Delete the old widgets.. + # del self._stackwidgets[-len(widgets):] + # for widget in widgets: + # self._cframe.destroy_widget(widget) + # + # # Make a new one. + # tok = self._parser.stack()[-1] + # if isinstance(tok, Tree): + # attribs = {'tree_color': '#4080a0', 'tree_width': 2, + # 'node_font': bold, 'node_color': '#006060', + # 'leaf_color': '#006060', 'leaf_font':self._font} + # widget = tree_to_treesegment(self._canvas, tok.type(), + # **attribs) + # widget.node()['color'] = '#000000' + # else: + # widget = TextWidget(self._canvas, tok.type(), + # color='#000000', font=self._font) + # widget.bind_click(self._popup_reduce) + # (x1, y1, x2, y2) = self._stacklabel.bbox() + # y = y2-y1+10 + # if not self._stackwidgets: x = 5 + # else: x = self._stackwidgets[-1].bbox()[2] + 10 + # self._cframe.add_widget(widget, x, y) + # self._stackwidgets.append(widget) + + # self._redraw() + self._animating_lock = 0 + + ######################################### + ## Hovering. + ######################################### + + def _highlight_hover(self, event): + # What production are we hovering over? + index = self._prodlist.nearest(event.y) + if self._hover == index: + return + + # Clear any previous hover highlighting. + self._clear_hover() + + # If the production corresponds to an available reduction, + # highlight the stack. + selection = [int(s) for s in self._prodlist.curselection()] + if index in selection: + rhslen = len(self._productions[index].rhs()) + for stackwidget in self._stackwidgets[-rhslen:]: + if isinstance(stackwidget, TreeSegmentWidget): + stackwidget.label()["color"] = "#00a000" + else: + stackwidget["color"] = "#00a000" + + # Remember what production we're hovering over. + self._hover = index + + def _clear_hover(self, *event): + # Clear any previous hover highlighting. + if self._hover == -1: + return + self._hover = -1 + for stackwidget in self._stackwidgets: + if isinstance(stackwidget, TreeSegmentWidget): + stackwidget.label()["color"] = "black" + else: + stackwidget["color"] = "black" + + +def app(): + """ + Create a shift reduce parser app, using a simple grammar and + text. + """ + + from nltk.grammar import CFG, Nonterminal, Production + + nonterminals = "S VP NP PP P N Name V Det" + (S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split()) + + productions = ( + # Syntactic Productions + Production(S, [NP, VP]), + Production(NP, [Det, N]), + Production(NP, [NP, PP]), + Production(VP, [VP, PP]), + Production(VP, [V, NP, PP]), + Production(VP, [V, NP]), + Production(PP, [P, NP]), + # Lexical Productions + Production(NP, ["I"]), + Production(Det, ["the"]), + Production(Det, ["a"]), + Production(N, ["man"]), + Production(V, ["saw"]), + Production(P, ["in"]), + Production(P, ["with"]), + Production(N, ["park"]), + Production(N, ["dog"]), + Production(N, ["statue"]), + Production(Det, ["my"]), + ) + + grammar = CFG(S, productions) + + # tokenize the sentence + sent = "my dog saw a man in the park with a statue".split() + + ShiftReduceApp(grammar, sent).mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/lib/python3.10/site-packages/nltk/app/wordfreq_app.py b/lib/python3.10/site-packages/nltk/app/wordfreq_app.py new file mode 100644 index 0000000000000000000000000000000000000000..2846b31216be4611aeabb539782137f2f0decac7 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/app/wordfreq_app.py @@ -0,0 +1,36 @@ +# Natural Language Toolkit: Wordfreq Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sumukh Ghodke +# URL: +# For license information, see LICENSE.TXT + +from matplotlib import pylab + +from nltk.corpus import gutenberg +from nltk.text import Text + + +def plot_word_freq_dist(text): + fd = text.vocab() + + samples = [item for item, _ in fd.most_common(50)] + values = [fd[sample] for sample in samples] + values = [sum(values[: i + 1]) * 100.0 / fd.N() for i in range(len(values))] + pylab.title(text.name) + pylab.xlabel("Samples") + pylab.ylabel("Cumulative Percentage") + pylab.plot(values) + pylab.xticks(range(len(samples)), [str(s) for s in samples], rotation=90) + pylab.show() + + +def app(): + t1 = Text(gutenberg.words("melville-moby_dick.txt")) + plot_word_freq_dist(t1) + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/lib/python3.10/site-packages/nltk/app/wordnet_app.py b/lib/python3.10/site-packages/nltk/app/wordnet_app.py new file mode 100644 index 0000000000000000000000000000000000000000..afed38b947d0ec231fe4d6f2f56614358d98c7b2 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/app/wordnet_app.py @@ -0,0 +1,1005 @@ +# Natural Language Toolkit: WordNet Browser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jussi Salmela +# Paul Bone +# URL: +# For license information, see LICENSE.TXT + +""" +A WordNet Browser application which launches the default browser +(if it is not already running) and opens a new tab with a connection +to http://localhost:port/ . It also starts an HTTP server on the +specified port and begins serving browser requests. The default +port is 8000. (For command-line help, run "python wordnet -h") +This application requires that the user's web browser supports +Javascript. + +BrowServer is a server for browsing the NLTK Wordnet database It first +launches a browser client to be used for browsing and then starts +serving the requests of that and maybe other clients + +Usage:: + + browserver.py -h + browserver.py [-s] [-p ] + +Options:: + + -h or --help + Display this help message. + + -l or --log-file + Logs messages to the given file, If this option is not specified + messages are silently dropped. + + -p or --port + Run the web server on this TCP port, defaults to 8000. + + -s or --server-mode + Do not start a web browser, and do not allow a user to + shutdown the server through the web interface. +""" +# TODO: throughout this package variable names and docstrings need +# modifying to be compliant with NLTK's coding standards. Tests also +# need to be develop to ensure this continues to work in the face of +# changes to other NLTK packages. + +import base64 +import copy +import getopt +import io +import os +import pickle +import sys +import threading +import time +import webbrowser +from collections import defaultdict +from http.server import BaseHTTPRequestHandler, HTTPServer + +# Allow this program to run inside the NLTK source tree. +from sys import argv +from urllib.parse import unquote_plus + +from nltk.corpus import wordnet as wn +from nltk.corpus.reader.wordnet import Lemma, Synset + +firstClient = True + +# True if we're not also running a web browser. The value f server_mode +# gets set by demo(). +server_mode = None + +# If set this is a file object for writing log messages. +logfile = None + + +class MyServerHandler(BaseHTTPRequestHandler): + def do_HEAD(self): + self.send_head() + + def do_GET(self): + global firstClient + sp = self.path[1:] + if unquote_plus(sp) == "SHUTDOWN THE SERVER": + if server_mode: + page = "Server must be killed with SIGTERM." + type = "text/plain" + else: + print("Server shutting down!") + os._exit(0) + + elif sp == "": # First request. + type = "text/html" + if not server_mode and firstClient: + firstClient = False + page = get_static_index_page(True) + else: + page = get_static_index_page(False) + word = "green" + + elif sp.endswith(".html"): # Trying to fetch a HTML file TODO: + type = "text/html" + usp = unquote_plus(sp) + if usp == "NLTK Wordnet Browser Database Info.html": + word = "* Database Info *" + if os.path.isfile(usp): + with open(usp) as infile: + page = infile.read() + else: + page = ( + (html_header % word) + "

The database info file:" + "

" + + usp + + "" + + "

was not found. Run this:" + + "

python dbinfo_html.py" + + "

to produce it." + + html_trailer + ) + else: + # Handle files here. + word = sp + try: + page = get_static_page_by_path(usp) + except FileNotFoundError: + page = "Internal error: Path for static page '%s' is unknown" % usp + # Set type to plain to prevent XSS by printing the path as HTML + type = "text/plain" + elif sp.startswith("search"): + # This doesn't seem to work with MWEs. + type = "text/html" + parts = (sp.split("?")[1]).split("&") + word = [ + p.split("=")[1].replace("+", " ") + for p in parts + if p.startswith("nextWord") + ][0] + page, word = page_from_word(word) + elif sp.startswith("lookup_"): + # TODO add a variation of this that takes a non ecoded word or MWE. + type = "text/html" + sp = sp[len("lookup_") :] + page, word = page_from_href(sp) + elif sp == "start_page": + # if this is the first request we should display help + # information, and possibly set a default word. + type = "text/html" + page, word = page_from_word("wordnet") + else: + type = "text/plain" + page = "Could not parse request: '%s'" % sp + + # Send result. + self.send_head(type) + self.wfile.write(page.encode("utf8")) + + def send_head(self, type=None): + self.send_response(200) + self.send_header("Content-type", type) + self.end_headers() + + def log_message(self, format, *args): + global logfile + + if logfile: + logfile.write( + "%s - - [%s] %s\n" + % (self.address_string(), self.log_date_time_string(), format % args) + ) + + +def get_unique_counter_from_url(sp): + """ + Extract the unique counter from the URL if it has one. Otherwise return + null. + """ + pos = sp.rfind("%23") + if pos != -1: + return int(sp[(pos + 3) :]) + else: + return None + + +def wnb(port=8000, runBrowser=True, logfilename=None): + """ + Run NLTK Wordnet Browser Server. + + :param port: The port number for the server to listen on, defaults to + 8000 + :type port: int + + :param runBrowser: True to start a web browser and point it at the web + server. + :type runBrowser: bool + """ + # The webbrowser module is unpredictable, typically it blocks if it uses + # a console web browser, and doesn't block if it uses a GUI webbrowser, + # so we need to force it to have a clear correct behaviour. + # + # Normally the server should run for as long as the user wants. they + # should idealy be able to control this from the UI by closing the + # window or tab. Second best would be clicking a button to say + # 'Shutdown' that first shutsdown the server and closes the window or + # tab, or exits the text-mode browser. Both of these are unfreasable. + # + # The next best alternative is to start the server, have it close when + # it receives SIGTERM (default), and run the browser as well. The user + # may have to shutdown both programs. + # + # Since webbrowser may block, and the webserver will block, we must run + # them in separate threads. + # + global server_mode, logfile + server_mode = not runBrowser + + # Setup logging. + if logfilename: + try: + logfile = open(logfilename, "a", 1) # 1 means 'line buffering' + except OSError as e: + sys.stderr.write("Couldn't open %s for writing: %s", logfilename, e) + sys.exit(1) + else: + logfile = None + + # Compute URL and start web browser + url = "http://localhost:" + str(port) + + server_ready = None + browser_thread = None + + if runBrowser: + server_ready = threading.Event() + browser_thread = startBrowser(url, server_ready) + + # Start the server. + server = HTTPServer(("", port), MyServerHandler) + if logfile: + logfile.write("NLTK Wordnet browser server running serving: %s\n" % url) + if runBrowser: + server_ready.set() + + try: + server.serve_forever() + except KeyboardInterrupt: + pass + + if runBrowser: + browser_thread.join() + + if logfile: + logfile.close() + + +def startBrowser(url, server_ready): + def run(): + server_ready.wait() + time.sleep(1) # Wait a little bit more, there's still the chance of + # a race condition. + webbrowser.open(url, new=2, autoraise=1) + + t = threading.Thread(target=run) + t.start() + return t + + +##################################################################### +# Utilities +##################################################################### + + +""" +WordNet Browser Utilities. + +This provides a backend to both wxbrowse and browserver.py. +""" + +################################################################################ +# +# Main logic for wordnet browser. +# + +# This is wrapped inside a function since wn is only available if the +# WordNet corpus is installed. +def _pos_tuples(): + return [ + (wn.NOUN, "N", "noun"), + (wn.VERB, "V", "verb"), + (wn.ADJ, "J", "adj"), + (wn.ADV, "R", "adv"), + ] + + +def _pos_match(pos_tuple): + """ + This function returns the complete pos tuple for the partial pos + tuple given to it. It attempts to match it against the first + non-null component of the given pos tuple. + """ + if pos_tuple[0] == "s": + pos_tuple = ("a", pos_tuple[1], pos_tuple[2]) + for n, x in enumerate(pos_tuple): + if x is not None: + break + for pt in _pos_tuples(): + if pt[n] == pos_tuple[n]: + return pt + return None + + +HYPONYM = 0 +HYPERNYM = 1 +CLASS_REGIONAL = 2 +PART_HOLONYM = 3 +PART_MERONYM = 4 +ATTRIBUTE = 5 +SUBSTANCE_HOLONYM = 6 +SUBSTANCE_MERONYM = 7 +MEMBER_HOLONYM = 8 +MEMBER_MERONYM = 9 +VERB_GROUP = 10 +INSTANCE_HYPONYM = 12 +INSTANCE_HYPERNYM = 13 +CAUSE = 14 +ALSO_SEE = 15 +SIMILAR = 16 +ENTAILMENT = 17 +ANTONYM = 18 +FRAMES = 19 +PERTAINYM = 20 + +CLASS_CATEGORY = 21 +CLASS_USAGE = 22 +CLASS_REGIONAL = 23 +CLASS_USAGE = 24 +CLASS_CATEGORY = 11 + +DERIVATIONALLY_RELATED_FORM = 25 + +INDIRECT_HYPERNYMS = 26 + + +def lemma_property(word, synset, func): + def flattern(l): + if l == []: + return [] + else: + return l[0] + flattern(l[1:]) + + return flattern([func(l) for l in synset.lemmas() if l.name == word]) + + +def rebuild_tree(orig_tree): + node = orig_tree[0] + children = orig_tree[1:] + return (node, [rebuild_tree(t) for t in children]) + + +def get_relations_data(word, synset): + """ + Get synset relations data for a synset. Note that this doesn't + yet support things such as full hyponym vs direct hyponym. + """ + if synset.pos() == wn.NOUN: + return ( + (HYPONYM, "Hyponyms", synset.hyponyms()), + (INSTANCE_HYPONYM, "Instance hyponyms", synset.instance_hyponyms()), + (HYPERNYM, "Direct hypernyms", synset.hypernyms()), + ( + INDIRECT_HYPERNYMS, + "Indirect hypernyms", + rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1], + ), + # hypernyms', 'Sister terms', + (INSTANCE_HYPERNYM, "Instance hypernyms", synset.instance_hypernyms()), + # (CLASS_REGIONAL, ['domain term region'], ), + (PART_HOLONYM, "Part holonyms", synset.part_holonyms()), + (PART_MERONYM, "Part meronyms", synset.part_meronyms()), + (SUBSTANCE_HOLONYM, "Substance holonyms", synset.substance_holonyms()), + (SUBSTANCE_MERONYM, "Substance meronyms", synset.substance_meronyms()), + (MEMBER_HOLONYM, "Member holonyms", synset.member_holonyms()), + (MEMBER_MERONYM, "Member meronyms", synset.member_meronyms()), + (ATTRIBUTE, "Attributes", synset.attributes()), + (ANTONYM, "Antonyms", lemma_property(word, synset, lambda l: l.antonyms())), + ( + DERIVATIONALLY_RELATED_FORM, + "Derivationally related form", + lemma_property( + word, synset, lambda l: l.derivationally_related_forms() + ), + ), + ) + elif synset.pos() == wn.VERB: + return ( + (ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())), + (HYPONYM, "Hyponym", synset.hyponyms()), + (HYPERNYM, "Direct hypernyms", synset.hypernyms()), + ( + INDIRECT_HYPERNYMS, + "Indirect hypernyms", + rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1], + ), + (ENTAILMENT, "Entailments", synset.entailments()), + (CAUSE, "Causes", synset.causes()), + (ALSO_SEE, "Also see", synset.also_sees()), + (VERB_GROUP, "Verb Groups", synset.verb_groups()), + ( + DERIVATIONALLY_RELATED_FORM, + "Derivationally related form", + lemma_property( + word, synset, lambda l: l.derivationally_related_forms() + ), + ), + ) + elif synset.pos() == wn.ADJ or synset.pos == wn.ADJ_SAT: + return ( + (ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())), + (SIMILAR, "Similar to", synset.similar_tos()), + # Participle of verb - not supported by corpus + ( + PERTAINYM, + "Pertainyms", + lemma_property(word, synset, lambda l: l.pertainyms()), + ), + (ATTRIBUTE, "Attributes", synset.attributes()), + (ALSO_SEE, "Also see", synset.also_sees()), + ) + elif synset.pos() == wn.ADV: + # This is weird. adverbs such as 'quick' and 'fast' don't seem + # to have antonyms returned by the corpus.a + return ( + (ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())), + ) + # Derived from adjective - not supported by corpus + else: + raise TypeError("Unhandles synset POS type: " + str(synset.pos())) + + +html_header = """ + + + + + +NLTK Wordnet Browser display of: %s + +""" +html_trailer = """ + + +""" + +explanation = """ +

Search Help

+
  • The display below the line is an example of the output the browser +shows you when you enter a search word. The search word was green.
  • +
  • The search result shows for different parts of speech the synsets +i.e. different meanings for the word.
  • +
  • All underlined texts are hypertext links. There are two types of links: +word links and others. Clicking a word link carries out a search for the word +in the Wordnet database.
  • +
  • Clicking a link of the other type opens a display section of data attached +to that link. Clicking that link a second time closes the section again.
  • +
  • Clicking S: opens a section showing the relations for that synset. +
  • +
  • Clicking on a relation name opens a section that displays the associated +synsets.
  • +
  • Type a search word in the Word field and start the search by the +Enter/Return key or click the Search button.
  • +
+
+""" + +# HTML oriented functions + + +def _bold(txt): + return "%s" % txt + + +def _center(txt): + return "
%s
" % txt + + +def _hlev(n, txt): + return "%s" % (n, txt, n) + + +def _italic(txt): + return "%s" % txt + + +def _li(txt): + return "
  • %s
  • " % txt + + +def pg(word, body): + """ + Return a HTML page of NLTK Browser format constructed from the + word and body + + :param word: The word that the body corresponds to + :type word: str + :param body: The HTML body corresponding to the word + :type body: str + :return: a HTML page for the word-body combination + :rtype: str + """ + return (html_header % word) + body + html_trailer + + +def _ul(txt): + return "
      " + txt + "
    " + + +def _abbc(txt): + """ + abbc = asterisks, breaks, bold, center + """ + return _center(_bold("
    " * 10 + "*" * 10 + " " + txt + " " + "*" * 10)) + + +full_hyponym_cont_text = _ul(_li(_italic("(has full hyponym continuation)"))) + "\n" + + +def _get_synset(synset_key): + """ + The synset key is the unique name of the synset, this can be + retrieved via synset.name() + """ + return wn.synset(synset_key) + + +def _collect_one_synset(word, synset, synset_relations): + """ + Returns the HTML string for one synset or word + + :param word: the current word + :type word: str + :param synset: a synset + :type synset: synset + :param synset_relations: information about which synset relations + to display. + :type synset_relations: dict(synset_key, set(relation_id)) + :return: The HTML string built for this synset + :rtype: str + """ + if isinstance(synset, tuple): # It's a word + raise NotImplementedError("word not supported by _collect_one_synset") + + typ = "S" + pos_tuple = _pos_match((synset.pos(), None, None)) + assert pos_tuple is not None, "pos_tuple is null: synset.pos(): %s" % synset.pos() + descr = pos_tuple[2] + ref = copy.deepcopy(Reference(word, synset_relations)) + ref.toggle_synset(synset) + synset_label = typ + ";" + if synset.name() in synset_relations: + synset_label = _bold(synset_label) + s = f"
  • {make_lookup_link(ref, synset_label)} ({descr}) " + + def format_lemma(w): + w = w.replace("_", " ") + if w.lower() == word: + return _bold(w) + else: + ref = Reference(w) + return make_lookup_link(ref, w) + + s += ", ".join(format_lemma(l.name()) for l in synset.lemmas()) + + gl = " ({}) {} ".format( + synset.definition(), + "; ".join('"%s"' % e for e in synset.examples()), + ) + return s + gl + _synset_relations(word, synset, synset_relations) + "
  • \n" + + +def _collect_all_synsets(word, pos, synset_relations=dict()): + """ + Return a HTML unordered list of synsets for the given word and + part of speech. + """ + return "
      %s\n
    \n" % "".join( + _collect_one_synset(word, synset, synset_relations) + for synset in wn.synsets(word, pos) + ) + + +def _synset_relations(word, synset, synset_relations): + """ + Builds the HTML string for the relations of a synset + + :param word: The current word + :type word: str + :param synset: The synset for which we're building the relations. + :type synset: Synset + :param synset_relations: synset keys and relation types for which to display relations. + :type synset_relations: dict(synset_key, set(relation_type)) + :return: The HTML for a synset's relations + :rtype: str + """ + + if not synset.name() in synset_relations: + return "" + ref = Reference(word, synset_relations) + + def relation_html(r): + if isinstance(r, Synset): + return make_lookup_link(Reference(r.lemma_names()[0]), r.lemma_names()[0]) + elif isinstance(r, Lemma): + return relation_html(r.synset()) + elif isinstance(r, tuple): + # It's probably a tuple containing a Synset and a list of + # similar tuples. This forms a tree of synsets. + return "{}\n
      {}
    \n".format( + relation_html(r[0]), + "".join("
  • %s
  • \n" % relation_html(sr) for sr in r[1]), + ) + else: + raise TypeError( + "r must be a synset, lemma or list, it was: type(r) = %s, r = %s" + % (type(r), r) + ) + + def make_synset_html(db_name, disp_name, rels): + synset_html = "%s\n" % make_lookup_link( + copy.deepcopy(ref).toggle_synset_relation(synset, db_name), + disp_name, + ) + + if db_name in ref.synset_relations[synset.name()]: + synset_html += "
      %s
    \n" % "".join( + "
  • %s
  • \n" % relation_html(r) for r in rels + ) + + return synset_html + + html = ( + "
      " + + "\n".join( + "
    • %s
    • " % make_synset_html(*rel_data) + for rel_data in get_relations_data(word, synset) + if rel_data[2] != [] + ) + + "
    " + ) + + return html + + +class RestrictedUnpickler(pickle.Unpickler): + """ + Unpickler that prevents any class or function from being used during loading. + """ + + def find_class(self, module, name): + # Forbid every function + raise pickle.UnpicklingError(f"global '{module}.{name}' is forbidden") + + +class Reference: + """ + A reference to a page that may be generated by page_word + """ + + def __init__(self, word, synset_relations=dict()): + """ + Build a reference to a new page. + + word is the word or words (separated by commas) for which to + search for synsets of + + synset_relations is a dictionary of synset keys to sets of + synset relation identifaiers to unfold a list of synset + relations for. + """ + self.word = word + self.synset_relations = synset_relations + + def encode(self): + """ + Encode this reference into a string to be used in a URL. + """ + # This uses a tuple rather than an object since the python + # pickle representation is much smaller and there is no need + # to represent the complete object. + string = pickle.dumps((self.word, self.synset_relations), -1) + return base64.urlsafe_b64encode(string).decode() + + @staticmethod + def decode(string): + """ + Decode a reference encoded with Reference.encode + """ + string = base64.urlsafe_b64decode(string.encode()) + word, synset_relations = RestrictedUnpickler(io.BytesIO(string)).load() + return Reference(word, synset_relations) + + def toggle_synset_relation(self, synset, relation): + """ + Toggle the display of the relations for the given synset and + relation type. + + This function will throw a KeyError if the synset is currently + not being displayed. + """ + if relation in self.synset_relations[synset.name()]: + self.synset_relations[synset.name()].remove(relation) + else: + self.synset_relations[synset.name()].add(relation) + + return self + + def toggle_synset(self, synset): + """ + Toggle displaying of the relation types for the given synset + """ + if synset.name() in self.synset_relations: + del self.synset_relations[synset.name()] + else: + self.synset_relations[synset.name()] = set() + + return self + + +def make_lookup_link(ref, label): + return f'{label}' + + +def page_from_word(word): + """ + Return a HTML page for the given word. + + :type word: str + :param word: The currently active word + :return: A tuple (page,word), where page is the new current HTML page + to be sent to the browser and + word is the new current word + :rtype: A tuple (str,str) + """ + return page_from_reference(Reference(word)) + + +def page_from_href(href): + """ + Returns a tuple of the HTML page built and the new current word + + :param href: The hypertext reference to be solved + :type href: str + :return: A tuple (page,word), where page is the new current HTML page + to be sent to the browser and + word is the new current word + :rtype: A tuple (str,str) + """ + return page_from_reference(Reference.decode(href)) + + +def page_from_reference(href): + """ + Returns a tuple of the HTML page built and the new current word + + :param href: The hypertext reference to be solved + :type href: str + :return: A tuple (page,word), where page is the new current HTML page + to be sent to the browser and + word is the new current word + :rtype: A tuple (str,str) + """ + word = href.word + pos_forms = defaultdict(list) + words = word.split(",") + words = [w for w in [w.strip().lower().replace(" ", "_") for w in words] if w != ""] + if len(words) == 0: + # No words were found. + return "", "Please specify a word to search for." + + # This looks up multiple words at once. This is probably not + # necessary and may lead to problems. + for w in words: + for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]: + form = wn.morphy(w, pos) + if form and form not in pos_forms[pos]: + pos_forms[pos].append(form) + body = "" + for pos, pos_str, name in _pos_tuples(): + if pos in pos_forms: + body += _hlev(3, name) + "\n" + for w in pos_forms[pos]: + # Not all words of exc files are in the database, skip + # to the next word if a KeyError is raised. + try: + body += _collect_all_synsets(w, pos, href.synset_relations) + except KeyError: + pass + if not body: + body = "The word or words '%s' were not found in the dictionary." % word + return body, word + + +##################################################################### +# Static pages +##################################################################### + + +def get_static_page_by_path(path): + """ + Return a static HTML page from the path given. + """ + if path == "index_2.html": + return get_static_index_page(False) + elif path == "index.html": + return get_static_index_page(True) + elif path == "NLTK Wordnet Browser Database Info.html": + return "Display of Wordnet Database Statistics is not supported" + elif path == "upper_2.html": + return get_static_upper_page(False) + elif path == "upper.html": + return get_static_upper_page(True) + elif path == "web_help.html": + return get_static_web_help_page() + elif path == "wx_help.html": + return get_static_wx_help_page() + raise FileNotFoundError() + + +def get_static_web_help_page(): + """ + Return the static web help page. + """ + return """ + + + + + + NLTK Wordnet Browser display of: * Help * + + +

    NLTK Wordnet Browser Help

    +

    The NLTK Wordnet Browser is a tool to use in browsing the Wordnet database. It tries to behave like the Wordnet project's web browser but the difference is that the NLTK Wordnet Browser uses a local Wordnet database. +

    You are using the Javascript client part of the NLTK Wordnet BrowseServer. We assume your browser is in tab sheets enabled mode.

    +

    For background information on Wordnet, see the Wordnet project home page: https://wordnet.princeton.edu/. For more information on the NLTK project, see the project home: +https://www.nltk.org/. To get an idea of what the Wordnet version used by this browser includes choose Show Database Info from the View submenu.

    +

    Word search

    +

    The word to be searched is typed into the New Word field and the search started with Enter or by clicking the Search button. There is no uppercase/lowercase distinction: the search word is transformed to lowercase before the search.

    +

    In addition, the word does not have to be in base form. The browser tries to find the possible base form(s) by making certain morphological substitutions. Typing fLIeS as an obscure example gives one this. Click the previous link to see what this kind of search looks like and then come back to this page by using the Alt+LeftArrow key combination.

    +

    The result of a search is a display of one or more +synsets for every part of speech in which a form of the +search word was found to occur. A synset is a set of words +having the same sense or meaning. Each word in a synset that is +underlined is a hyperlink which can be clicked to trigger an +automatic search for that word.

    +

    Every synset has a hyperlink S: at the start of its +display line. Clicking that symbol shows you the name of every +relation that this synset is part of. Every relation name is a hyperlink that opens up a display for that relation. Clicking it another time closes the display again. Clicking another relation name on a line that has an opened relation closes the open relation and opens the clicked relation.

    +

    It is also possible to give two or more words or collocations to be searched at the same time separating them with a comma like this cheer up,clear up, for example. Click the previous link to see what this kind of search looks like and then come back to this page by using the Alt+LeftArrow key combination. As you could see the search result includes the synsets found in the same order than the forms were given in the search field.

    +

    +There are also word level (lexical) relations recorded in the Wordnet database. Opening this kind of relation displays lines with a hyperlink W: at their beginning. Clicking this link shows more info on the word in question.

    +

    The Buttons

    +

    The Search and Help buttons need no more explanation.

    +

    The Show Database Info button shows a collection of Wordnet database statistics.

    +

    The Shutdown the Server button is shown for the first client of the BrowServer program i.e. for the client that is automatically launched when the BrowServer is started but not for the succeeding clients in order to protect the server from accidental shutdowns. +

    + +""" + + +def get_static_welcome_message(): + """ + Get the static welcome page. + """ + return """ +

    Search Help

    +
    • The display below the line is an example of the output the browser +shows you when you enter a search word. The search word was green.
    • +
    • The search result shows for different parts of speech the synsets +i.e. different meanings for the word.
    • +
    • All underlined texts are hypertext links. There are two types of links: +word links and others. Clicking a word link carries out a search for the word +in the Wordnet database.
    • +
    • Clicking a link of the other type opens a display section of data attached +to that link. Clicking that link a second time closes the section again.
    • +
    • Clicking S: opens a section showing the relations for that synset.
    • +
    • Clicking on a relation name opens a section that displays the associated +synsets.
    • +
    • Type a search word in the Next Word field and start the search by the +Enter/Return key or click the Search button.
    • +
    +""" + + +def get_static_index_page(with_shutdown): + """ + Get the static index page. + """ + template = """ + + + + + NLTK Wordnet Browser + + + + + + + +""" + if with_shutdown: + upper_link = "upper.html" + else: + upper_link = "upper_2.html" + + return template % upper_link + + +def get_static_upper_page(with_shutdown): + """ + Return the upper frame page, + + If with_shutdown is True then a 'shutdown' button is also provided + to shutdown the server. + """ + template = """ + + + + + + Untitled Document + + +
    + Current Word:  + Next Word:  + +
    + Help + %s + + + +""" + if with_shutdown: + shutdown_link = 'Shutdown' + else: + shutdown_link = "" + + return template % shutdown_link + + +def usage(): + """ + Display the command line help message. + """ + print(__doc__) + + +def app(): + # Parse and interpret options. + (opts, _) = getopt.getopt( + argv[1:], "l:p:sh", ["logfile=", "port=", "server-mode", "help"] + ) + port = 8000 + server_mode = False + help_mode = False + logfilename = None + for (opt, value) in opts: + if (opt == "-l") or (opt == "--logfile"): + logfilename = str(value) + elif (opt == "-p") or (opt == "--port"): + port = int(value) + elif (opt == "-s") or (opt == "--server-mode"): + server_mode = True + elif (opt == "-h") or (opt == "--help"): + help_mode = True + + if help_mode: + usage() + else: + wnb(port, not server_mode, logfilename) + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/lib/python3.10/site-packages/nltk/chat/__init__.py b/lib/python3.10/site-packages/nltk/chat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..462f0b517068657d149662cf990414f203491caf --- /dev/null +++ b/lib/python3.10/site-packages/nltk/chat/__init__.py @@ -0,0 +1,48 @@ +# Natural Language Toolkit: Chatbots +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Based on an Eliza implementation by Joe Strout , +# Jeff Epler and Jez Higgins . + +""" +A class for simple chatbots. These perform simple pattern matching on sentences +typed by users, and respond with automatically generated sentences. + +These chatbots may not work using the windows command line or the +windows IDLE GUI. +""" + +from nltk.chat.eliza import eliza_chat +from nltk.chat.iesha import iesha_chat +from nltk.chat.rude import rude_chat +from nltk.chat.suntsu import suntsu_chat +from nltk.chat.util import Chat +from nltk.chat.zen import zen_chat + +bots = [ + (eliza_chat, "Eliza (psycho-babble)"), + (iesha_chat, "Iesha (teen anime junky)"), + (rude_chat, "Rude (abusive bot)"), + (suntsu_chat, "Suntsu (Chinese sayings)"), + (zen_chat, "Zen (gems of wisdom)"), +] + + +def chatbots(): + print("Which chatbot would you like to talk to?") + botcount = len(bots) + for i in range(botcount): + print(" %d: %s" % (i + 1, bots[i][1])) + while True: + choice = input(f"\nEnter a number in the range 1-{botcount}: ").strip() + if choice.isdigit() and (int(choice) - 1) in range(botcount): + break + else: + print(" Error: bad chatbot number") + + chatbot = bots[int(choice) - 1][0] + chatbot() diff --git a/lib/python3.10/site-packages/nltk/chat/eliza.py b/lib/python3.10/site-packages/nltk/chat/eliza.py new file mode 100644 index 0000000000000000000000000000000000000000..5dfb4a4be2caa084c89a169f4861bd7a4b3eacf3 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/chat/eliza.py @@ -0,0 +1,337 @@ +# Natural Language Toolkit: Eliza +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +# Based on an Eliza implementation by Joe Strout , +# Jeff Epler and Jez Higgins . + +# a translation table used to convert things you say into things the +# computer says back, e.g. "I am" --> "you are" + +from nltk.chat.util import Chat, reflections + +# a table of response pairs, where each pair consists of a +# regular expression, and a list of possible responses, +# with group-macros labelled as %1, %2. + +pairs = ( + ( + r"I need (.*)", + ( + "Why do you need %1?", + "Would it really help you to get %1?", + "Are you sure you need %1?", + ), + ), + ( + r"Why don\'t you (.*)", + ( + "Do you really think I don't %1?", + "Perhaps eventually I will %1.", + "Do you really want me to %1?", + ), + ), + ( + r"Why can\'t I (.*)", + ( + "Do you think you should be able to %1?", + "If you could %1, what would you do?", + "I don't know -- why can't you %1?", + "Have you really tried?", + ), + ), + ( + r"I can\'t (.*)", + ( + "How do you know you can't %1?", + "Perhaps you could %1 if you tried.", + "What would it take for you to %1?", + ), + ), + ( + r"I am (.*)", + ( + "Did you come to me because you are %1?", + "How long have you been %1?", + "How do you feel about being %1?", + ), + ), + ( + r"I\'m (.*)", + ( + "How does being %1 make you feel?", + "Do you enjoy being %1?", + "Why do you tell me you're %1?", + "Why do you think you're %1?", + ), + ), + ( + r"Are you (.*)", + ( + "Why does it matter whether I am %1?", + "Would you prefer it if I were not %1?", + "Perhaps you believe I am %1.", + "I may be %1 -- what do you think?", + ), + ), + ( + r"What (.*)", + ( + "Why do you ask?", + "How would an answer to that help you?", + "What do you think?", + ), + ), + ( + r"How (.*)", + ( + "How do you suppose?", + "Perhaps you can answer your own question.", + "What is it you're really asking?", + ), + ), + ( + r"Because (.*)", + ( + "Is that the real reason?", + "What other reasons come to mind?", + "Does that reason apply to anything else?", + "If %1, what else must be true?", + ), + ), + ( + r"(.*) sorry (.*)", + ( + "There are many times when no apology is needed.", + "What feelings do you have when you apologize?", + ), + ), + ( + r"Hello(.*)", + ( + "Hello... I'm glad you could drop by today.", + "Hi there... how are you today?", + "Hello, how are you feeling today?", + ), + ), + ( + r"I think (.*)", + ("Do you doubt %1?", "Do you really think so?", "But you're not sure %1?"), + ), + ( + r"(.*) friend (.*)", + ( + "Tell me more about your friends.", + "When you think of a friend, what comes to mind?", + "Why don't you tell me about a childhood friend?", + ), + ), + (r"Yes", ("You seem quite sure.", "OK, but can you elaborate a bit?")), + ( + r"(.*) computer(.*)", + ( + "Are you really talking about me?", + "Does it seem strange to talk to a computer?", + "How do computers make you feel?", + "Do you feel threatened by computers?", + ), + ), + ( + r"Is it (.*)", + ( + "Do you think it is %1?", + "Perhaps it's %1 -- what do you think?", + "If it were %1, what would you do?", + "It could well be that %1.", + ), + ), + ( + r"It is (.*)", + ( + "You seem very certain.", + "If I told you that it probably isn't %1, what would you feel?", + ), + ), + ( + r"Can you (.*)", + ( + "What makes you think I can't %1?", + "If I could %1, then what?", + "Why do you ask if I can %1?", + ), + ), + ( + r"Can I (.*)", + ( + "Perhaps you don't want to %1.", + "Do you want to be able to %1?", + "If you could %1, would you?", + ), + ), + ( + r"You are (.*)", + ( + "Why do you think I am %1?", + "Does it please you to think that I'm %1?", + "Perhaps you would like me to be %1.", + "Perhaps you're really talking about yourself?", + ), + ), + ( + r"You\'re (.*)", + ( + "Why do you say I am %1?", + "Why do you think I am %1?", + "Are we talking about you, or me?", + ), + ), + ( + r"I don\'t (.*)", + ("Don't you really %1?", "Why don't you %1?", "Do you want to %1?"), + ), + ( + r"I feel (.*)", + ( + "Good, tell me more about these feelings.", + "Do you often feel %1?", + "When do you usually feel %1?", + "When you feel %1, what do you do?", + ), + ), + ( + r"I have (.*)", + ( + "Why do you tell me that you've %1?", + "Have you really %1?", + "Now that you have %1, what will you do next?", + ), + ), + ( + r"I would (.*)", + ( + "Could you explain why you would %1?", + "Why would you %1?", + "Who else knows that you would %1?", + ), + ), + ( + r"Is there (.*)", + ( + "Do you think there is %1?", + "It's likely that there is %1.", + "Would you like there to be %1?", + ), + ), + ( + r"My (.*)", + ( + "I see, your %1.", + "Why do you say that your %1?", + "When your %1, how do you feel?", + ), + ), + ( + r"You (.*)", + ( + "We should be discussing you, not me.", + "Why do you say that about me?", + "Why do you care whether I %1?", + ), + ), + (r"Why (.*)", ("Why don't you tell me the reason why %1?", "Why do you think %1?")), + ( + r"I want (.*)", + ( + "What would it mean to you if you got %1?", + "Why do you want %1?", + "What would you do if you got %1?", + "If you got %1, then what would you do?", + ), + ), + ( + r"(.*) mother(.*)", + ( + "Tell me more about your mother.", + "What was your relationship with your mother like?", + "How do you feel about your mother?", + "How does this relate to your feelings today?", + "Good family relations are important.", + ), + ), + ( + r"(.*) father(.*)", + ( + "Tell me more about your father.", + "How did your father make you feel?", + "How do you feel about your father?", + "Does your relationship with your father relate to your feelings today?", + "Do you have trouble showing affection with your family?", + ), + ), + ( + r"(.*) child(.*)", + ( + "Did you have close friends as a child?", + "What is your favorite childhood memory?", + "Do you remember any dreams or nightmares from childhood?", + "Did the other children sometimes tease you?", + "How do you think your childhood experiences relate to your feelings today?", + ), + ), + ( + r"(.*)\?", + ( + "Why do you ask that?", + "Please consider whether you can answer your own question.", + "Perhaps the answer lies within yourself?", + "Why don't you tell me?", + ), + ), + ( + r"quit", + ( + "Thank you for talking with me.", + "Good-bye.", + "Thank you, that will be $150. Have a good day!", + ), + ), + ( + r"(.*)", + ( + "Please tell me more.", + "Let's change focus a bit... Tell me about your family.", + "Can you elaborate on that?", + "Why do you say that %1?", + "I see.", + "Very interesting.", + "%1.", + "I see. And what does that tell you?", + "How does that make you feel?", + "How do you feel when you say that?", + ), + ), +) + +eliza_chatbot = Chat(pairs, reflections) + + +def eliza_chat(): + print("Therapist\n---------") + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("Hello. How are you feeling today?") + + eliza_chatbot.converse() + + +def demo(): + eliza_chat() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/chat/iesha.py b/lib/python3.10/site-packages/nltk/chat/iesha.py new file mode 100644 index 0000000000000000000000000000000000000000..552870caa30927f30b96c5dbdfd2ccb459cf48a8 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/chat/iesha.py @@ -0,0 +1,160 @@ +# Natural Language Toolkit: Teen Chatbot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Selina Dennis +# URL: +# For license information, see LICENSE.TXT + +""" +This chatbot is a tongue-in-cheek take on the average teen +anime junky that frequents YahooMessenger or MSNM. +All spelling mistakes and flawed grammar are intentional. +""" + +from nltk.chat.util import Chat + +reflections = { + "am": "r", + "was": "were", + "i": "u", + "i'd": "u'd", + "i've": "u'v", + "ive": "u'v", + "i'll": "u'll", + "my": "ur", + "are": "am", + "you're": "im", + "you've": "ive", + "you'll": "i'll", + "your": "my", + "yours": "mine", + "you": "me", + "u": "me", + "ur": "my", + "urs": "mine", + "me": "u", +} + +# Note: %1/2/etc are used without spaces prior as the chat bot seems +# to add a superfluous space when matching. + +pairs = ( + ( + r"I\'m (.*)", + ( + "ur%1?? that's so cool! kekekekeke ^_^ tell me more!", + "ur%1? neat!! kekeke >_<", + ), + ), + ( + r"(.*) don\'t you (.*)", + ( + r"u think I can%2??! really?? kekeke \<_\<", + "what do u mean%2??!", + "i could if i wanted, don't you think!! kekeke", + ), + ), + (r"ye[as] [iI] (.*)", ("u%1? cool!! how?", "how come u%1??", "u%1? so do i!!")), + ( + r"do (you|u) (.*)\??", + ("do i%2? only on tuesdays! kekeke *_*", "i dunno! do u%2??"), + ), + ( + r"(.*)\?", + ( + "man u ask lots of questions!", + "booooring! how old r u??", + "boooooring!! ur not very fun", + ), + ), + ( + r"(cos|because) (.*)", + ("hee! i don't believe u! >_<", "nuh-uh! >_<", "ooooh i agree!"), + ), + ( + r"why can\'t [iI] (.*)", + ( + "i dunno! y u askin me for!", + "try harder, silly! hee! ^_^", + "i dunno! but when i can't%1 i jump up and down!", + ), + ), + ( + r"I can\'t (.*)", + ( + "u can't what??! >_<", + "that's ok! i can't%1 either! kekekekeke ^_^", + "try harder, silly! hee! ^&^", + ), + ), + ( + r"(.*) (like|love|watch) anime", + ( + "omg i love anime!! do u like sailor moon??! ^&^", + "anime yay! anime rocks sooooo much!", + "oooh anime! i love anime more than anything!", + "anime is the bestest evar! evangelion is the best!", + "hee anime is the best! do you have ur fav??", + ), + ), + ( + r"I (like|love|watch|play) (.*)", + ("yay! %2 rocks!", "yay! %2 is neat!", "cool! do u like other stuff?? ^_^"), + ), + ( + r"anime sucks|(.*) (hate|detest) anime", + ( + "ur a liar! i'm not gonna talk to u nemore if u h8 anime *;*", + "no way! anime is the best ever!", + "nuh-uh, anime is the best!", + ), + ), + ( + r"(are|r) (you|u) (.*)", + ("am i%1??! how come u ask that!", "maybe! y shud i tell u?? kekeke >_>"), + ), + ( + r"what (.*)", + ("hee u think im gonna tell u? .v.", "booooooooring! ask me somethin else!"), + ), + (r"how (.*)", ("not tellin!! kekekekekeke ^_^",)), + (r"(hi|hello|hey) (.*)", ("hi!!! how r u!!",)), + ( + r"quit", + ( + "mom says i have to go eat dinner now :,( bye!!", + "awww u have to go?? see u next time!!", + "how to see u again soon! ^_^", + ), + ), + ( + r"(.*)", + ( + "ur funny! kekeke", + "boooooring! talk about something else! tell me wat u like!", + "do u like anime??", + "do u watch anime? i like sailor moon! ^_^", + "i wish i was a kitty!! kekekeke ^_^", + ), + ), +) + +iesha_chatbot = Chat(pairs, reflections) + + +def iesha_chat(): + print("Iesha the TeenBoT\n---------") + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("hi!! i'm iesha! who r u??!") + + iesha_chatbot.converse() + + +def demo(): + iesha_chat() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/chat/rude.py b/lib/python3.10/site-packages/nltk/chat/rude.py new file mode 100644 index 0000000000000000000000000000000000000000..77404e42bc4d4c9c279540a7bac18fa47d78b9cc --- /dev/null +++ b/lib/python3.10/site-packages/nltk/chat/rude.py @@ -0,0 +1,125 @@ +# Natural Language Toolkit: Rude Chatbot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Spiller +# URL: +# For license information, see LICENSE.TXT + +from nltk.chat.util import Chat, reflections + +pairs = ( + ( + r"We (.*)", + ( + "What do you mean, 'we'?", + "Don't include me in that!", + "I wouldn't be so sure about that.", + ), + ), + ( + r"You should (.*)", + ("Don't tell me what to do, buddy.", "Really? I should, should I?"), + ), + ( + r"You\'re(.*)", + ( + "More like YOU'RE %1!", + "Hah! Look who's talking.", + "Come over here and tell me I'm %1.", + ), + ), + ( + r"You are(.*)", + ( + "More like YOU'RE %1!", + "Hah! Look who's talking.", + "Come over here and tell me I'm %1.", + ), + ), + ( + r"I can\'t(.*)", + ( + "You do sound like the type who can't %1.", + "Hear that splashing sound? That's my heart bleeding for you.", + "Tell somebody who might actually care.", + ), + ), + ( + r"I think (.*)", + ( + "I wouldn't think too hard if I were you.", + "You actually think? I'd never have guessed...", + ), + ), + ( + r"I (.*)", + ( + "I'm getting a bit tired of hearing about you.", + "How about we talk about me instead?", + "Me, me, me... Frankly, I don't care.", + ), + ), + ( + r"How (.*)", + ( + "How do you think?", + "Take a wild guess.", + "I'm not even going to dignify that with an answer.", + ), + ), + (r"What (.*)", ("Do I look like an encyclopedia?", "Figure it out yourself.")), + ( + r"Why (.*)", + ( + "Why not?", + "That's so obvious I thought even you'd have already figured it out.", + ), + ), + ( + r"(.*)shut up(.*)", + ( + "Make me.", + "Getting angry at a feeble NLP assignment? Somebody's losing it.", + "Say that again, I dare you.", + ), + ), + ( + r"Shut up(.*)", + ( + "Make me.", + "Getting angry at a feeble NLP assignment? Somebody's losing it.", + "Say that again, I dare you.", + ), + ), + ( + r"Hello(.*)", + ("Oh good, somebody else to talk to. Joy.", "'Hello'? How original..."), + ), + ( + r"(.*)", + ( + "I'm getting bored here. Become more interesting.", + "Either become more thrilling or get lost, buddy.", + "Change the subject before I die of fatal boredom.", + ), + ), +) + +rude_chatbot = Chat(pairs, reflections) + + +def rude_chat(): + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("I suppose I should say hello.") + + rude_chatbot.converse() + + +def demo(): + rude_chat() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/chat/suntsu.py b/lib/python3.10/site-packages/nltk/chat/suntsu.py new file mode 100644 index 0000000000000000000000000000000000000000..2130c7da1d630a2d8f78412d4b02d518d540af9f --- /dev/null +++ b/lib/python3.10/site-packages/nltk/chat/suntsu.py @@ -0,0 +1,140 @@ +# Natural Language Toolkit: Sun Tsu-Bot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sam Huston 2007 +# URL: +# For license information, see LICENSE.TXT + +""" +Tsu bot responds to all queries with a Sun Tsu sayings + +Quoted from Sun Tsu's The Art of War +Translated by LIONEL GILES, M.A. 1910 +Hosted by the Gutenberg Project +https://www.gutenberg.org/ +""" + +from nltk.chat.util import Chat, reflections + +pairs = ( + (r"quit", ("Good-bye.", "Plan well", "May victory be your future")), + ( + r"[^\?]*\?", + ( + "Please consider whether you can answer your own question.", + "Ask me no questions!", + ), + ), + ( + r"[0-9]+(.*)", + ( + "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.", + "There are five essentials for victory", + ), + ), + ( + r"[A-Ca-c](.*)", + ( + "The art of war is of vital importance to the State.", + "All warfare is based on deception.", + "If your opponent is secure at all points, be prepared for him. If he is in superior strength, evade him.", + "If the campaign is protracted, the resources of the State will not be equal to the strain.", + "Attack him where he is unprepared, appear where you are not expected.", + "There is no instance of a country having benefited from prolonged warfare.", + ), + ), + ( + r"[D-Fd-f](.*)", + ( + "The skillful soldier does not raise a second levy, neither are his supply-wagons loaded more than twice.", + "Bring war material with you from home, but forage on the enemy.", + "In war, then, let your great object be victory, not lengthy campaigns.", + "To fight and conquer in all your battles is not supreme excellence; supreme excellence consists in breaking the enemy's resistance without fighting.", + ), + ), + ( + r"[G-Ig-i](.*)", + ( + "Heaven signifies night and day, cold and heat, times and seasons.", + "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.", + "The good fighters of old first put themselves beyond the possibility of defeat, and then waited for an opportunity of defeating the enemy.", + "One may know how to conquer without being able to do it.", + ), + ), + ( + r"[J-Lj-l](.*)", + ( + "There are three ways in which a ruler can bring misfortune upon his army.", + "By commanding the army to advance or to retreat, being ignorant of the fact that it cannot obey. This is called hobbling the army.", + "By attempting to govern an army in the same way as he administers a kingdom, being ignorant of the conditions which obtain in an army. This causes restlessness in the soldier's minds.", + "By employing the officers of his army without discrimination, through ignorance of the military principle of adaptation to circumstances. This shakes the confidence of the soldiers.", + "There are five essentials for victory", + "He will win who knows when to fight and when not to fight.", + "He will win who knows how to handle both superior and inferior forces.", + "He will win whose army is animated by the same spirit throughout all its ranks.", + "He will win who, prepared himself, waits to take the enemy unprepared.", + "He will win who has military capacity and is not interfered with by the sovereign.", + ), + ), + ( + r"[M-Om-o](.*)", + ( + "If you know the enemy and know yourself, you need not fear the result of a hundred battles.", + "If you know yourself but not the enemy, for every victory gained you will also suffer a defeat.", + "If you know neither the enemy nor yourself, you will succumb in every battle.", + "The control of a large force is the same principle as the control of a few men: it is merely a question of dividing up their numbers.", + ), + ), + ( + r"[P-Rp-r](.*)", + ( + "Security against defeat implies defensive tactics; ability to defeat the enemy means taking the offensive.", + "Standing on the defensive indicates insufficient strength; attacking, a superabundance of strength.", + "He wins his battles by making no mistakes. Making no mistakes is what establishes the certainty of victory, for it means conquering an enemy that is already defeated.", + "A victorious army opposed to a routed one, is as a pound's weight placed in the scale against a single grain.", + "The onrush of a conquering force is like the bursting of pent-up waters into a chasm a thousand fathoms deep.", + ), + ), + ( + r"[S-Us-u](.*)", + ( + "What the ancients called a clever fighter is one who not only wins, but excels in winning with ease.", + "Hence his victories bring him neither reputation for wisdom nor credit for courage.", + "Hence the skillful fighter puts himself into a position which makes defeat impossible, and does not miss the moment for defeating the enemy.", + "In war the victorious strategist only seeks battle after the victory has been won, whereas he who is destined to defeat first fights and afterwards looks for victory.", + "There are not more than five musical notes, yet the combinations of these five give rise to more melodies than can ever be heard.", + "Appear at points which the enemy must hasten to defend; march swiftly to places where you are not expected.", + ), + ), + ( + r"[V-Zv-z](.*)", + ( + "It is a matter of life and death, a road either to safety or to ruin.", + "Hold out baits to entice the enemy. Feign disorder, and crush him.", + "All men can see the tactics whereby I conquer, but what none can see is the strategy out of which victory is evolved.", + "Do not repeat the tactics which have gained you one victory, but let your methods be regulated by the infinite variety of circumstances.", + "So in war, the way is to avoid what is strong and to strike at what is weak.", + "Just as water retains no constant shape, so in warfare there are no constant conditions.", + ), + ), + (r"(.*)", ("Your statement insults me.", "")), +) + +suntsu_chatbot = Chat(pairs, reflections) + + +def suntsu_chat(): + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("You seek enlightenment?") + + suntsu_chatbot.converse() + + +def demo(): + suntsu_chat() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/chat/util.py b/lib/python3.10/site-packages/nltk/chat/util.py new file mode 100644 index 0000000000000000000000000000000000000000..ddcb246ce3b74a15cd4c87bb180811553849af1b --- /dev/null +++ b/lib/python3.10/site-packages/nltk/chat/util.py @@ -0,0 +1,124 @@ +# Natural Language Toolkit: Chatbot Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Based on an Eliza implementation by Joe Strout , +# Jeff Epler and Jez Higgins . + +import random +import re + +reflections = { + "i am": "you are", + "i was": "you were", + "i": "you", + "i'm": "you are", + "i'd": "you would", + "i've": "you have", + "i'll": "you will", + "my": "your", + "you are": "I am", + "you were": "I was", + "you've": "I have", + "you'll": "I will", + "your": "my", + "yours": "mine", + "you": "me", + "me": "you", +} + + +class Chat: + def __init__(self, pairs, reflections={}): + """ + Initialize the chatbot. Pairs is a list of patterns and responses. Each + pattern is a regular expression matching the user's statement or question, + e.g. r'I like (.*)'. For each such pattern a list of possible responses + is given, e.g. ['Why do you like %1', 'Did you ever dislike %1']. Material + which is matched by parenthesized sections of the patterns (e.g. .*) is mapped to + the numbered positions in the responses, e.g. %1. + + :type pairs: list of tuple + :param pairs: The patterns and responses + :type reflections: dict + :param reflections: A mapping between first and second person expressions + :rtype: None + """ + + self._pairs = [(re.compile(x, re.IGNORECASE), y) for (x, y) in pairs] + self._reflections = reflections + self._regex = self._compile_reflections() + + def _compile_reflections(self): + sorted_refl = sorted(self._reflections, key=len, reverse=True) + return re.compile( + r"\b({})\b".format("|".join(map(re.escape, sorted_refl))), re.IGNORECASE + ) + + def _substitute(self, str): + """ + Substitute words in the string, according to the specified reflections, + e.g. "I'm" -> "you are" + + :type str: str + :param str: The string to be mapped + :rtype: str + """ + + return self._regex.sub( + lambda mo: self._reflections[mo.string[mo.start() : mo.end()]], str.lower() + ) + + def _wildcards(self, response, match): + pos = response.find("%") + while pos >= 0: + num = int(response[pos + 1 : pos + 2]) + response = ( + response[:pos] + + self._substitute(match.group(num)) + + response[pos + 2 :] + ) + pos = response.find("%") + return response + + def respond(self, str): + """ + Generate a response to the user input. + + :type str: str + :param str: The string to be mapped + :rtype: str + """ + + # check each pattern + for (pattern, response) in self._pairs: + match = pattern.match(str) + + # did the pattern match? + if match: + resp = random.choice(response) # pick a random response + resp = self._wildcards(resp, match) # process wildcards + + # fix munged punctuation at the end + if resp[-2:] == "?.": + resp = resp[:-2] + "." + if resp[-2:] == "??": + resp = resp[:-2] + "?" + return resp + + # Hold a conversation with a chatbot + def converse(self, quit="quit"): + user_input = "" + while user_input != quit: + user_input = quit + try: + user_input = input(">") + except EOFError: + print(user_input) + if user_input: + while user_input[-1] in "!.": + user_input = user_input[:-1] + print(self.respond(user_input)) diff --git a/lib/python3.10/site-packages/nltk/chat/zen.py b/lib/python3.10/site-packages/nltk/chat/zen.py new file mode 100644 index 0000000000000000000000000000000000000000..5972d47764056b388fca1a24acb88bd87369274f --- /dev/null +++ b/lib/python3.10/site-packages/nltk/chat/zen.py @@ -0,0 +1,329 @@ +# Natural Language Toolkit: Zen Chatbot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Amy Holland +# URL: +# For license information, see LICENSE.TXT + +""" +Zen Chatbot talks in gems of Zen wisdom. + +This is a sample conversation with Zen Chatbot: +ZC: Welcome, my child. +me: Good afternoon. +ZC: Ask the question you have come to ask. +me: How can I achieve enlightenment? +ZC: How do you suppose? +me: Through meditation. +ZC: Form is emptiness, and emptiness form. +me: How can I empty my mind of worldly troubles? +ZC: Will an answer to that really help in your search for enlightenment? +me: Yes. +ZC: It is better to be right than to be certain. +me: I seek truth and wisdom. +ZC: The search for truth is a long journey. +me: Are you sure? +ZC: Maybe sure, maybe not sure. + + +The chatbot structure is based on that of chat.eliza. Thus, it uses +a translation table to convert from question to response +i.e. "I am" --> "you are" + +Of course, since Zen Chatbot does not understand the meaning of any words, +responses are very limited. Zen Chatbot will usually answer very vaguely, or +respond to a question by asking a different question, in much the same way +as Eliza. +""" + +from nltk.chat.util import Chat, reflections + +# responses are matched top to bottom, so non-specific matches occur later +# for each match, a list of possible responses is provided +responses = ( + # Zen Chatbot opens with the line "Welcome, my child." The usual + # response will be a greeting problem: 'good' matches "good morning", + # "good day" etc, but also "good grief!" and other sentences starting + # with the word 'good' that may not be a greeting + ( + r"(hello(.*))|(good [a-zA-Z]+)", + ( + "The path to enlightenment is often difficult to see.", + "Greetings. I sense your mind is troubled. Tell me of your troubles.", + "Ask the question you have come to ask.", + "Hello. Do you seek englightenment?", + ), + ), + # "I need" and "I want" can be followed by a thing (eg 'help') + # or an action (eg 'to see you') + # + # This is a problem with this style of response - + # person: "I need you" + # chatbot: "me can be achieved by hard work and dedication of the mind" + # i.e. 'you' is not really a thing that can be mapped this way, so this + # interpretation only makes sense for some inputs + # + ( + r"i need (.*)", + ( + "%1 can be achieved by hard work and dedication of the mind.", + "%1 is not a need, but a desire of the mind. Clear your mind of such concerns.", + "Focus your mind on%1, and you will find what you need.", + ), + ), + ( + r"i want (.*)", + ( + "Desires of the heart will distract you from the path to enlightenment.", + "Will%1 help you attain enlightenment?", + "Is%1 a desire of the mind, or of the heart?", + ), + ), + # why questions are separated into three types: + # "why..I" e.g. "why am I here?" "Why do I like cake?" + # "why..you" e.g. "why are you here?" "Why won't you tell me?" + # "why..." e.g. "Why is the sky blue?" + # problems: + # person: "Why can't you tell me?" + # chatbot: "Are you sure I tell you?" + # - this style works for positives (e.g. "why do you like cake?") + # but does not work for negatives (e.g. "why don't you like cake?") + (r"why (.*) i (.*)\?", ("You%1%2?", "Perhaps you only think you%1%2")), + (r"why (.*) you(.*)\?", ("Why%1 you%2?", "%2 I%1", "Are you sure I%2?")), + (r"why (.*)\?", ("I cannot tell you why%1.", "Why do you think %1?")), + # e.g. "are you listening?", "are you a duck" + ( + r"are you (.*)\?", + ("Maybe%1, maybe not%1.", "Whether I am%1 or not is God's business."), + ), + # e.g. "am I a duck?", "am I going to die?" + ( + r"am i (.*)\?", + ("Perhaps%1, perhaps not%1.", "Whether you are%1 or not is not for me to say."), + ), + # what questions, e.g. "what time is it?" + # problems: + # person: "What do you want?" + # chatbot: "Seek truth, not what do me want." + (r"what (.*)\?", ("Seek truth, not what%1.", "What%1 should not concern you.")), + # how questions, e.g. "how do you do?" + ( + r"how (.*)\?", + ( + "How do you suppose?", + "Will an answer to that really help in your search for enlightenment?", + "Ask yourself not how, but why.", + ), + ), + # can questions, e.g. "can you run?", "can you come over here please?" + ( + r"can you (.*)\?", + ( + "I probably can, but I may not.", + "Maybe I can%1, and maybe I cannot.", + "I can do all, and I can do nothing.", + ), + ), + # can questions, e.g. "can I have some cake?", "can I know truth?" + ( + r"can i (.*)\?", + ( + "You can%1 if you believe you can%1, and have a pure spirit.", + "Seek truth and you will know if you can%1.", + ), + ), + # e.g. "It is raining" - implies the speaker is certain of a fact + ( + r"it is (.*)", + ( + "How can you be certain that%1, when you do not even know yourself?", + "Whether it is%1 or not does not change the way the world is.", + ), + ), + # e.g. "is there a doctor in the house?" + ( + r"is there (.*)\?", + ("There is%1 if you believe there is.", "It is possible that there is%1."), + ), + # e.g. "is it possible?", "is this true?" + (r"is(.*)\?", ("%1 is not relevant.", "Does this matter?")), + # non-specific question + ( + r"(.*)\?", + ( + "Do you think %1?", + "You seek the truth. Does the truth seek you?", + "If you intentionally pursue the answers to your questions, the answers become hard to see.", + "The answer to your question cannot be told. It must be experienced.", + ), + ), + # expression of hate of form "I hate you" or "Kelly hates cheese" + ( + r"(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)", + ( + "Perhaps it is not about hating %2, but about hate from within.", + "Weeds only grow when we dislike them", + "Hate is a very strong emotion.", + ), + ), + # statement containing the word 'truth' + ( + r"(.*) truth(.*)", + ( + "Seek truth, and truth will seek you.", + "Remember, it is not the spoon which bends - only yourself.", + "The search for truth is a long journey.", + ), + ), + # desire to do an action + # e.g. "I want to go shopping" + ( + r"i want to (.*)", + ("You may %1 if your heart truly desires to.", "You may have to %1."), + ), + # desire for an object + # e.g. "I want a pony" + ( + r"i want (.*)", + ( + "Does your heart truly desire %1?", + "Is this a desire of the heart, or of the mind?", + ), + ), + # e.g. "I can't wait" or "I can't do this" + ( + r"i can\'t (.*)", + ( + "What we can and can't do is a limitation of the mind.", + "There are limitations of the body, and limitations of the mind.", + "Have you tried to%1 with a clear mind?", + ), + ), + # "I think.." indicates uncertainty. e.g. "I think so." + # problem: exceptions... + # e.g. "I think, therefore I am" + ( + r"i think (.*)", + ( + "Uncertainty in an uncertain world.", + "Indeed, how can we be certain of anything in such uncertain times.", + "Are you not, in fact, certain that%1?", + ), + ), + # "I feel...emotions/sick/light-headed..." + ( + r"i feel (.*)", + ( + "Your body and your emotions are both symptoms of your mind." + "What do you believe is the root of such feelings?", + "Feeling%1 can be a sign of your state-of-mind.", + ), + ), + # exclaimation mark indicating emotion + # e.g. "Wow!" or "No!" + ( + r"(.*)!", + ( + "I sense that you are feeling emotional today.", + "You need to calm your emotions.", + ), + ), + # because [statement] + # e.g. "because I said so" + ( + r"because (.*)", + ( + "Does knowning the reasons behind things help you to understand" + " the things themselves?", + "If%1, what else must be true?", + ), + ), + # yes or no - raise an issue of certainty/correctness + ( + r"(yes)|(no)", + ( + "Is there certainty in an uncertain world?", + "It is better to be right than to be certain.", + ), + ), + # sentence containing word 'love' + ( + r"(.*)love(.*)", + ( + "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.", + "Free love!", + ), + ), + # sentence containing word 'understand' - r + ( + r"(.*)understand(.*)", + ( + "If you understand, things are just as they are;" + " if you do not understand, things are just as they are.", + "Imagination is more important than knowledge.", + ), + ), + # 'I', 'me', 'my' - person is talking about themself. + # this breaks down when words contain these - eg 'Thyme', 'Irish' + ( + r"(.*)(me )|( me)|(my)|(mine)|(i)(.*)", + ( + "'I', 'me', 'my'... these are selfish expressions.", + "Have you ever considered that you might be a selfish person?", + "Try to consider others, not just yourself.", + "Think not just of yourself, but of others.", + ), + ), + # 'you' starting a sentence + # e.g. "you stink!" + ( + r"you (.*)", + ("My path is not of concern to you.", "I am but one, and you but one more."), + ), + # say goodbye with some extra Zen wisdom. + ( + r"exit", + ( + "Farewell. The obstacle is the path.", + "Farewell. Life is a journey, not a destination.", + "Good bye. We are cups, constantly and quietly being filled." + "\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.", + ), + ), + # fall through case - + # when stumped, respond with generic zen wisdom + # + ( + r"(.*)", + ( + "When you're enlightened, every word is wisdom.", + "Random talk is useless.", + "The reverse side also has a reverse side.", + "Form is emptiness, and emptiness is form.", + "I pour out a cup of water. Is the cup empty?", + ), + ), +) + +zen_chatbot = Chat(responses, reflections) + + +def zen_chat(): + print("*" * 75) + print("Zen Chatbot!".center(75)) + print("*" * 75) + print('"Look beyond mere words and letters - look into your mind"'.center(75)) + print("* Talk your way to truth with Zen Chatbot.") + print("* Type 'quit' when you have had enough.") + print("*" * 75) + print("Welcome, my child.") + + zen_chatbot.converse() + + +def demo(): + zen_chat() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/draw/__init__.py b/lib/python3.10/site-packages/nltk/draw/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e3d4308b5e4be658f94a175631eadc62c84008b --- /dev/null +++ b/lib/python3.10/site-packages/nltk/draw/__init__.py @@ -0,0 +1,27 @@ +# Natural Language Toolkit: graphical representations package +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Import Tkinter-based modules if Tkinter is installed +try: + import tkinter +except ImportError: + import warnings + + warnings.warn("nltk.draw package not loaded (please install Tkinter library).") +else: + from nltk.draw.cfg import ProductionList, CFGEditor, CFGDemo + from nltk.draw.tree import ( + TreeSegmentWidget, + tree_to_treesegment, + TreeWidget, + TreeView, + draw_trees, + ) + from nltk.draw.table import Table + +from nltk.draw.dispersion import dispersion_plot diff --git a/lib/python3.10/site-packages/nltk/draw/cfg.py b/lib/python3.10/site-packages/nltk/draw/cfg.py new file mode 100644 index 0000000000000000000000000000000000000000..650162abf095d439cf7ca2ba3f0f36c81f0ed041 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/draw/cfg.py @@ -0,0 +1,859 @@ +# Natural Language Toolkit: CFG visualization +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Visualization tools for CFGs. +""" + +# Idea for a nice demo: +# - 3 panes: grammar, treelet, working area +# - grammar is a list of productions +# - when you select a production, the treelet that it licenses appears +# in the treelet area +# - the working area has the text on the bottom, and S at top. When +# you select a production, it shows (ghosted) the locations where +# that production's treelet could be attached to either the text +# or the tree rooted at S. +# - the user can drag the treelet onto one of those (or click on them?) +# - the user can delete pieces of the tree from the working area +# (right click?) +# - connecting top to bottom? drag one NP onto another? +# +# +-------------------------------------------------------------+ +# | S -> NP VP | S | +# |[NP -> Det N ]| / \ | +# | ... | NP VP | +# | N -> 'dog' | | +# | N -> 'cat' | | +# | ... | | +# +--------------+ | +# | NP | Det N | +# | / \ | | | | +# | Det N | the cat saw the dog | +# | | | +# +--------------+----------------------------------------------+ +# +# Operations: +# - connect a new treelet -- drag or click shadow +# - delete a treelet -- right click +# - if only connected to top, delete everything below +# - if only connected to bottom, delete everything above +# - connect top & bottom -- drag a leaf to a root or a root to a leaf +# - disconnect top & bottom -- right click +# - if connected to top & bottom, then disconnect + +import re +from tkinter import ( + Button, + Canvas, + Entry, + Frame, + IntVar, + Label, + Scrollbar, + Text, + Tk, + Toplevel, +) + +from nltk.draw.tree import TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import ( + CanvasFrame, + ColorizedList, + ShowText, + SymbolWidget, + TextWidget, +) +from nltk.grammar import CFG, Nonterminal, _read_cfg_production, nonterminals +from nltk.tree import Tree + +###################################################################### +# Production List +###################################################################### + + +class ProductionList(ColorizedList): + ARROW = SymbolWidget.SYMBOLS["rightarrow"] + + def _init_colortags(self, textwidget, options): + textwidget.tag_config("terminal", foreground="#006000") + textwidget.tag_config("arrow", font="symbol", underline="0") + textwidget.tag_config( + "nonterminal", foreground="blue", font=("helvetica", -12, "bold") + ) + + def _item_repr(self, item): + contents = [] + contents.append(("%s\t" % item.lhs(), "nonterminal")) + contents.append((self.ARROW, "arrow")) + for elt in item.rhs(): + if isinstance(elt, Nonterminal): + contents.append((" %s" % elt.symbol(), "nonterminal")) + else: + contents.append((" %r" % elt, "terminal")) + return contents + + +###################################################################### +# CFG Editor +###################################################################### + +_CFGEditor_HELP = """ + +The CFG Editor can be used to create or modify context free grammars. +A context free grammar consists of a start symbol and a list of +productions. The start symbol is specified by the text entry field in +the upper right hand corner of the editor; and the list of productions +are specified in the main text editing box. + +Every non-blank line specifies a single production. Each production +has the form "LHS -> RHS," where LHS is a single nonterminal, and RHS +is a list of nonterminals and terminals. + +Nonterminals must be a single word, such as S or NP or NP_subj. +Currently, nonterminals must consists of alphanumeric characters and +underscores (_). Nonterminals are colored blue. If you place the +mouse over any nonterminal, then all occurrences of that nonterminal +will be highlighted. + +Terminals must be surrounded by single quotes (') or double +quotes(\"). For example, "dog" and "New York" are terminals. +Currently, the string within the quotes must consist of alphanumeric +characters, underscores, and spaces. + +To enter a new production, go to a blank line, and type a nonterminal, +followed by an arrow (->), followed by a sequence of terminals and +nonterminals. Note that "->" (dash + greater-than) is automatically +converted to an arrow symbol. When you move your cursor to a +different line, your production will automatically be colorized. If +there are any errors, they will be highlighted in red. + +Note that the order of the productions is significant for some +algorithms. To re-order the productions, use cut and paste to move +them. + +Use the buttons at the bottom of the window when you are done editing +the CFG: + - Ok: apply the new CFG, and exit the editor. + - Apply: apply the new CFG, and do not exit the editor. + - Reset: revert to the original CFG, and do not exit the editor. + - Cancel: revert to the original CFG, and exit the editor. + +""" + + +class CFGEditor: + """ + A dialog window for creating and editing context free grammars. + ``CFGEditor`` imposes the following restrictions: + + - All nonterminals must be strings consisting of word + characters. + - All terminals must be strings consisting of word characters + and space characters. + """ + + # Regular expressions used by _analyze_line. Precompile them, so + # we can process the text faster. + ARROW = SymbolWidget.SYMBOLS["rightarrow"] + _LHS_RE = re.compile(r"(^\s*\w+\s*)(->|(" + ARROW + "))") + _ARROW_RE = re.compile(r"\s*(->|(" + ARROW + r"))\s*") + _PRODUCTION_RE = re.compile( + r"(^\s*\w+\s*)" + + "(->|(" # LHS + + ARROW + + r"))\s*" + + r"((\w+|'[\w ]*'|\"[\w ]*\"|\|)\s*)*$" # arrow + ) # RHS + _TOKEN_RE = re.compile("\\w+|->|'[\\w ]+'|\"[\\w ]+\"|(" + ARROW + ")") + _BOLD = ("helvetica", -12, "bold") + + def __init__(self, parent, cfg=None, set_cfg_callback=None): + self._parent = parent + if cfg is not None: + self._cfg = cfg + else: + self._cfg = CFG(Nonterminal("S"), []) + self._set_cfg_callback = set_cfg_callback + + self._highlight_matching_nonterminals = 1 + + # Create the top-level window. + self._top = Toplevel(parent) + self._init_bindings() + + self._init_startframe() + self._startframe.pack(side="top", fill="x", expand=0) + self._init_prodframe() + self._prodframe.pack(side="top", fill="both", expand=1) + self._init_buttons() + self._buttonframe.pack(side="bottom", fill="x", expand=0) + + self._textwidget.focus() + + def _init_startframe(self): + frame = self._startframe = Frame(self._top) + self._start = Entry(frame) + self._start.pack(side="right") + Label(frame, text="Start Symbol:").pack(side="right") + Label(frame, text="Productions:").pack(side="left") + self._start.insert(0, self._cfg.start().symbol()) + + def _init_buttons(self): + frame = self._buttonframe = Frame(self._top) + Button(frame, text="Ok", command=self._ok, underline=0, takefocus=0).pack( + side="left" + ) + Button(frame, text="Apply", command=self._apply, underline=0, takefocus=0).pack( + side="left" + ) + Button(frame, text="Reset", command=self._reset, underline=0, takefocus=0).pack( + side="left" + ) + Button( + frame, text="Cancel", command=self._cancel, underline=0, takefocus=0 + ).pack(side="left") + Button(frame, text="Help", command=self._help, underline=0, takefocus=0).pack( + side="right" + ) + + def _init_bindings(self): + self._top.title("CFG Editor") + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + # self._top.bind('', self._cancel) + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + # self._top.bind('', self._cancel) + self._top.bind("", self._cancel) + + self._top.bind("", self._ok) + self._top.bind("", self._ok) + self._top.bind("", self._apply) + self._top.bind("", self._apply) + self._top.bind("", self._reset) + self._top.bind("", self._reset) + self._top.bind("", self._help) + self._top.bind("", self._help) + self._top.bind("", self._help) + + def _init_prodframe(self): + self._prodframe = Frame(self._top) + + # Create the basic Text widget & scrollbar. + self._textwidget = Text( + self._prodframe, background="#e0e0e0", exportselection=1 + ) + self._textscroll = Scrollbar(self._prodframe, takefocus=0, orient="vertical") + self._textwidget.config(yscrollcommand=self._textscroll.set) + self._textscroll.config(command=self._textwidget.yview) + self._textscroll.pack(side="right", fill="y") + self._textwidget.pack(expand=1, fill="both", side="left") + + # Initialize the colorization tags. Each nonterminal gets its + # own tag, so they aren't listed here. + self._textwidget.tag_config("terminal", foreground="#006000") + self._textwidget.tag_config("arrow", font="symbol") + self._textwidget.tag_config("error", background="red") + + # Keep track of what line they're on. We use that to remember + # to re-analyze a line whenever they leave it. + self._linenum = 0 + + # Expand "->" to an arrow. + self._top.bind(">", self._replace_arrows) + + # Re-colorize lines when appropriate. + self._top.bind("<>", self._analyze) + self._top.bind("", self._check_analyze) + self._top.bind("", self._check_analyze) + + # Tab cycles focus. (why doesn't this work??) + def cycle(e, textwidget=self._textwidget): + textwidget.tk_focusNext().focus() + + self._textwidget.bind("", cycle) + + prod_tuples = [(p.lhs(), [p.rhs()]) for p in self._cfg.productions()] + for i in range(len(prod_tuples) - 1, 0, -1): + if prod_tuples[i][0] == prod_tuples[i - 1][0]: + if () in prod_tuples[i][1]: + continue + if () in prod_tuples[i - 1][1]: + continue + print(prod_tuples[i - 1][1]) + print(prod_tuples[i][1]) + prod_tuples[i - 1][1].extend(prod_tuples[i][1]) + del prod_tuples[i] + + for lhs, rhss in prod_tuples: + print(lhs, rhss) + s = "%s ->" % lhs + for rhs in rhss: + for elt in rhs: + if isinstance(elt, Nonterminal): + s += " %s" % elt + else: + s += " %r" % elt + s += " |" + s = s[:-2] + "\n" + self._textwidget.insert("end", s) + + self._analyze() + + # # Add the producitons to the text widget, and colorize them. + # prod_by_lhs = {} + # for prod in self._cfg.productions(): + # if len(prod.rhs()) > 0: + # prod_by_lhs.setdefault(prod.lhs(),[]).append(prod) + # for (lhs, prods) in prod_by_lhs.items(): + # self._textwidget.insert('end', '%s ->' % lhs) + # self._textwidget.insert('end', self._rhs(prods[0])) + # for prod in prods[1:]: + # print '\t|'+self._rhs(prod), + # self._textwidget.insert('end', '\t|'+self._rhs(prod)) + # print + # self._textwidget.insert('end', '\n') + # for prod in self._cfg.productions(): + # if len(prod.rhs()) == 0: + # self._textwidget.insert('end', '%s' % prod) + # self._analyze() + + # def _rhs(self, prod): + # s = '' + # for elt in prod.rhs(): + # if isinstance(elt, Nonterminal): s += ' %s' % elt.symbol() + # else: s += ' %r' % elt + # return s + + def _clear_tags(self, linenum): + """ + Remove all tags (except ``arrow`` and ``sel``) from the given + line of the text widget used for editing the productions. + """ + start = "%d.0" % linenum + end = "%d.end" % linenum + for tag in self._textwidget.tag_names(): + if tag not in ("arrow", "sel"): + self._textwidget.tag_remove(tag, start, end) + + def _check_analyze(self, *e): + """ + Check if we've moved to a new line. If we have, then remove + all colorization from the line we moved to, and re-colorize + the line that we moved from. + """ + linenum = int(self._textwidget.index("insert").split(".")[0]) + if linenum != self._linenum: + self._clear_tags(linenum) + self._analyze_line(self._linenum) + self._linenum = linenum + + def _replace_arrows(self, *e): + """ + Replace any ``'->'`` text strings with arrows (char \\256, in + symbol font). This searches the whole buffer, but is fast + enough to be done anytime they press '>'. + """ + arrow = "1.0" + while True: + arrow = self._textwidget.search("->", arrow, "end+1char") + if arrow == "": + break + self._textwidget.delete(arrow, arrow + "+2char") + self._textwidget.insert(arrow, self.ARROW, "arrow") + self._textwidget.insert(arrow, "\t") + + arrow = "1.0" + while True: + arrow = self._textwidget.search(self.ARROW, arrow + "+1char", "end+1char") + if arrow == "": + break + self._textwidget.tag_add("arrow", arrow, arrow + "+1char") + + def _analyze_token(self, match, linenum): + """ + Given a line number and a regexp match for a token on that + line, colorize the token. Note that the regexp match gives us + the token's text, start index (on the line), and end index (on + the line). + """ + # What type of token is it? + if match.group()[0] in "'\"": + tag = "terminal" + elif match.group() in ("->", self.ARROW): + tag = "arrow" + else: + # If it's a nonterminal, then set up new bindings, so we + # can highlight all instances of that nonterminal when we + # put the mouse over it. + tag = "nonterminal_" + match.group() + if tag not in self._textwidget.tag_names(): + self._init_nonterminal_tag(tag) + + start = "%d.%d" % (linenum, match.start()) + end = "%d.%d" % (linenum, match.end()) + self._textwidget.tag_add(tag, start, end) + + def _init_nonterminal_tag(self, tag, foreground="blue"): + self._textwidget.tag_config(tag, foreground=foreground, font=CFGEditor._BOLD) + if not self._highlight_matching_nonterminals: + return + + def enter(e, textwidget=self._textwidget, tag=tag): + textwidget.tag_config(tag, background="#80ff80") + + def leave(e, textwidget=self._textwidget, tag=tag): + textwidget.tag_config(tag, background="") + + self._textwidget.tag_bind(tag, "", enter) + self._textwidget.tag_bind(tag, "", leave) + + def _analyze_line(self, linenum): + """ + Colorize a given line. + """ + # Get rid of any tags that were previously on the line. + self._clear_tags(linenum) + + # Get the line line's text string. + line = self._textwidget.get(repr(linenum) + ".0", repr(linenum) + ".end") + + # If it's a valid production, then colorize each token. + if CFGEditor._PRODUCTION_RE.match(line): + # It's valid; Use _TOKEN_RE to tokenize the production, + # and call analyze_token on each token. + def analyze_token(match, self=self, linenum=linenum): + self._analyze_token(match, linenum) + return "" + + CFGEditor._TOKEN_RE.sub(analyze_token, line) + elif line.strip() != "": + # It's invalid; show the user where the error is. + self._mark_error(linenum, line) + + def _mark_error(self, linenum, line): + """ + Mark the location of an error in a line. + """ + arrowmatch = CFGEditor._ARROW_RE.search(line) + if not arrowmatch: + # If there's no arrow at all, highlight the whole line. + start = "%d.0" % linenum + end = "%d.end" % linenum + elif not CFGEditor._LHS_RE.match(line): + # Otherwise, if the LHS is bad, highlight it. + start = "%d.0" % linenum + end = "%d.%d" % (linenum, arrowmatch.start()) + else: + # Otherwise, highlight the RHS. + start = "%d.%d" % (linenum, arrowmatch.end()) + end = "%d.end" % linenum + + # If we're highlighting 0 chars, highlight the whole line. + if self._textwidget.compare(start, "==", end): + start = "%d.0" % linenum + end = "%d.end" % linenum + self._textwidget.tag_add("error", start, end) + + def _analyze(self, *e): + """ + Replace ``->`` with arrows, and colorize the entire buffer. + """ + self._replace_arrows() + numlines = int(self._textwidget.index("end").split(".")[0]) + for linenum in range(1, numlines + 1): # line numbers start at 1. + self._analyze_line(linenum) + + def _parse_productions(self): + """ + Parse the current contents of the textwidget buffer, to create + a list of productions. + """ + productions = [] + + # Get the text, normalize it, and split it into lines. + text = self._textwidget.get("1.0", "end") + text = re.sub(self.ARROW, "->", text) + text = re.sub("\t", " ", text) + lines = text.split("\n") + + # Convert each line to a CFG production + for line in lines: + line = line.strip() + if line == "": + continue + productions += _read_cfg_production(line) + # if line.strip() == '': continue + # if not CFGEditor._PRODUCTION_RE.match(line): + # raise ValueError('Bad production string %r' % line) + # + # (lhs_str, rhs_str) = line.split('->') + # lhs = Nonterminal(lhs_str.strip()) + # rhs = [] + # def parse_token(match, rhs=rhs): + # token = match.group() + # if token[0] in "'\"": rhs.append(token[1:-1]) + # else: rhs.append(Nonterminal(token)) + # return '' + # CFGEditor._TOKEN_RE.sub(parse_token, rhs_str) + # + # productions.append(Production(lhs, *rhs)) + + return productions + + def _destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def _ok(self, *e): + self._apply() + self._destroy() + + def _apply(self, *e): + productions = self._parse_productions() + start = Nonterminal(self._start.get()) + cfg = CFG(start, productions) + if self._set_cfg_callback is not None: + self._set_cfg_callback(cfg) + + def _reset(self, *e): + self._textwidget.delete("1.0", "end") + for production in self._cfg.productions(): + self._textwidget.insert("end", "%s\n" % production) + self._analyze() + if self._set_cfg_callback is not None: + self._set_cfg_callback(self._cfg) + + def _cancel(self, *e): + try: + self._reset() + except: + pass + self._destroy() + + def _help(self, *e): + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._parent, + "Help: Chart Parser Demo", + (_CFGEditor_HELP).strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._parent, + "Help: Chart Parser Demo", + (_CFGEditor_HELP).strip(), + width=75, + ) + + +###################################################################### +# New Demo (built tree based on cfg) +###################################################################### + + +class CFGDemo: + def __init__(self, grammar, text): + self._grammar = grammar + self._text = text + + # Set up the main window. + self._top = Tk() + self._top.title("Context Free Grammar Demo") + + # Base font size + self._size = IntVar(self._top) + self._size.set(12) # = medium + + # Set up the key bindings + self._init_bindings(self._top) + + # Create the basic frames + frame1 = Frame(self._top) + frame1.pack(side="left", fill="y", expand=0) + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_grammar(frame1) + self._init_treelet(frame1) + self._init_workspace(self._top) + + # ////////////////////////////////////////////////// + # Initialization + # ////////////////////////////////////////////////// + + def _init_bindings(self, top): + top.bind("", self.destroy) + + def _init_menubar(self, parent): + pass + + def _init_buttons(self, parent): + pass + + def _init_grammar(self, parent): + self._prodlist = ProductionList(parent, self._grammar, width=20) + self._prodlist.pack(side="top", fill="both", expand=1) + self._prodlist.focus() + self._prodlist.add_callback("select", self._selectprod_cb) + self._prodlist.add_callback("move", self._selectprod_cb) + + def _init_treelet(self, parent): + self._treelet_canvas = Canvas(parent, background="white") + self._treelet_canvas.pack(side="bottom", fill="x") + self._treelet = None + + def _init_workspace(self, parent): + self._workspace = CanvasFrame(parent, background="white") + self._workspace.pack(side="right", fill="both", expand=1) + self._tree = None + self.reset_workspace() + + # ////////////////////////////////////////////////// + # Workspace + # ////////////////////////////////////////////////// + + def reset_workspace(self): + c = self._workspace.canvas() + fontsize = int(self._size.get()) + node_font = ("helvetica", -(fontsize + 4), "bold") + leaf_font = ("helvetica", -(fontsize + 2)) + + # Remove the old tree + if self._tree is not None: + self._workspace.remove_widget(self._tree) + + # The root of the tree. + start = self._grammar.start().symbol() + rootnode = TextWidget(c, start, font=node_font, draggable=1) + + # The leaves of the tree. + leaves = [] + for word in self._text: + leaves.append(TextWidget(c, word, font=leaf_font, draggable=1)) + + # Put it all together into one tree + self._tree = TreeSegmentWidget(c, rootnode, leaves, color="white") + + # Add it to the workspace. + self._workspace.add_widget(self._tree) + + # Move the leaves to the bottom of the workspace. + for leaf in leaves: + leaf.move(0, 100) + + # self._nodes = {start:1} + # self._leaves = dict([(l,1) for l in leaves]) + + def workspace_markprod(self, production): + pass + + def _markproduction(self, prod, tree=None): + if tree is None: + tree = self._tree + for i in range(len(tree.subtrees()) - len(prod.rhs())): + if tree["color", i] == "white": + self._markproduction # FIXME: Is this necessary at all? + + for j, node in enumerate(prod.rhs()): + widget = tree.subtrees()[i + j] + if ( + isinstance(node, Nonterminal) + and isinstance(widget, TreeSegmentWidget) + and node.symbol == widget.label().text() + ): + pass # matching nonterminal + elif ( + isinstance(node, str) + and isinstance(widget, TextWidget) + and node == widget.text() + ): + pass # matching nonterminal + else: + break + else: + # Everything matched! + print("MATCH AT", i) + + # ////////////////////////////////////////////////// + # Grammar + # ////////////////////////////////////////////////// + + def _selectprod_cb(self, production): + canvas = self._treelet_canvas + + self._prodlist.highlight(production) + if self._treelet is not None: + self._treelet.destroy() + + # Convert the production to a tree. + rhs = production.rhs() + for (i, elt) in enumerate(rhs): + if isinstance(elt, Nonterminal): + elt = Tree(elt) + tree = Tree(production.lhs().symbol(), *rhs) + + # Draw the tree in the treelet area. + fontsize = int(self._size.get()) + node_font = ("helvetica", -(fontsize + 4), "bold") + leaf_font = ("helvetica", -(fontsize + 2)) + self._treelet = tree_to_treesegment( + canvas, tree, node_font=node_font, leaf_font=leaf_font + ) + self._treelet["draggable"] = 1 + + # Center the treelet. + (x1, y1, x2, y2) = self._treelet.bbox() + w, h = int(canvas["width"]), int(canvas["height"]) + self._treelet.move((w - x1 - x2) / 2, (h - y1 - y2) / 2) + + # Mark the places where we can add it to the workspace. + self._markproduction(production) + + def destroy(self, *args): + self._top.destroy() + + def mainloop(self, *args, **kwargs): + self._top.mainloop(*args, **kwargs) + + +def demo2(): + from nltk import CFG, Nonterminal, Production + + nonterminals = "S VP NP PP P N Name V Det" + (S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split()) + productions = ( + # Syntactic Productions + Production(S, [NP, VP]), + Production(NP, [Det, N]), + Production(NP, [NP, PP]), + Production(VP, [VP, PP]), + Production(VP, [V, NP, PP]), + Production(VP, [V, NP]), + Production(PP, [P, NP]), + Production(PP, []), + Production(PP, ["up", "over", NP]), + # Lexical Productions + Production(NP, ["I"]), + Production(Det, ["the"]), + Production(Det, ["a"]), + Production(N, ["man"]), + Production(V, ["saw"]), + Production(P, ["in"]), + Production(P, ["with"]), + Production(N, ["park"]), + Production(N, ["dog"]), + Production(N, ["statue"]), + Production(Det, ["my"]), + ) + grammar = CFG(S, productions) + + text = "I saw a man in the park".split() + d = CFGDemo(grammar, text) + d.mainloop() + + +###################################################################### +# Old Demo +###################################################################### + + +def demo(): + from nltk import CFG, Nonterminal + + nonterminals = "S VP NP PP P N Name V Det" + (S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split()) + + grammar = CFG.fromstring( + """ + S -> NP VP + PP -> P NP + NP -> Det N + NP -> NP PP + VP -> V NP + VP -> VP PP + Det -> 'a' + Det -> 'the' + Det -> 'my' + NP -> 'I' + N -> 'dog' + N -> 'man' + N -> 'park' + N -> 'statue' + V -> 'saw' + P -> 'in' + P -> 'up' + P -> 'over' + P -> 'with' + """ + ) + + def cb(grammar): + print(grammar) + + top = Tk() + editor = CFGEditor(top, grammar, cb) + Label(top, text="\nTesting CFG Editor\n").pack() + Button(top, text="Quit", command=top.destroy).pack() + top.mainloop() + + +def demo3(): + from nltk import Production + + (S, VP, NP, PP, P, N, Name, V, Det) = nonterminals( + "S, VP, NP, PP, P, N, Name, V, Det" + ) + + productions = ( + # Syntactic Productions + Production(S, [NP, VP]), + Production(NP, [Det, N]), + Production(NP, [NP, PP]), + Production(VP, [VP, PP]), + Production(VP, [V, NP, PP]), + Production(VP, [V, NP]), + Production(PP, [P, NP]), + Production(PP, []), + Production(PP, ["up", "over", NP]), + # Lexical Productions + Production(NP, ["I"]), + Production(Det, ["the"]), + Production(Det, ["a"]), + Production(N, ["man"]), + Production(V, ["saw"]), + Production(P, ["in"]), + Production(P, ["with"]), + Production(N, ["park"]), + Production(N, ["dog"]), + Production(N, ["statue"]), + Production(Det, ["my"]), + ) + + t = Tk() + + def destroy(e, t=t): + t.destroy() + + t.bind("q", destroy) + p = ProductionList(t, productions) + p.pack(expand=1, fill="both") + p.add_callback("select", p.markonly) + p.add_callback("move", p.markonly) + p.focus() + p.mark(productions[2]) + p.mark(productions[8]) + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/draw/dispersion.py b/lib/python3.10/site-packages/nltk/draw/dispersion.py new file mode 100644 index 0000000000000000000000000000000000000000..0991194dc42e1c258b6e62c3e8dfb71d44bb3ce6 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/draw/dispersion.py @@ -0,0 +1,63 @@ +# Natural Language Toolkit: Dispersion Plots +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +A utility for displaying lexical dispersion. +""" + + +def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"): + """ + Generate a lexical dispersion plot. + + :param text: The source text + :type text: list(str) or iter(str) + :param words: The target words + :type words: list of str + :param ignore_case: flag to set if case should be ignored when searching text + :type ignore_case: bool + :return: a matplotlib Axes object that may still be modified before plotting + :rtype: Axes + """ + + try: + import matplotlib.pyplot as plt + except ImportError as e: + raise ImportError( + "The plot function requires matplotlib to be installed. " + "See https://matplotlib.org/" + ) from e + + word2y = { + word.casefold() if ignore_case else word: y + for y, word in enumerate(reversed(words)) + } + xs, ys = [], [] + for x, token in enumerate(text): + token = token.casefold() if ignore_case else token + y = word2y.get(token) + if y is not None: + xs.append(x) + ys.append(y) + + _, ax = plt.subplots() + ax.plot(xs, ys, "|") + ax.set_yticks(list(range(len(words))), words, color="C0") + ax.set_ylim(-1, len(words)) + ax.set_title(title) + ax.set_xlabel("Word Offset") + return ax + + +if __name__ == "__main__": + import matplotlib.pyplot as plt + + from nltk.corpus import gutenberg + + words = ["Elinor", "Marianne", "Edward", "Willoughby"] + dispersion_plot(gutenberg.words("austen-sense.txt"), words) + plt.show() diff --git a/lib/python3.10/site-packages/nltk/draw/table.py b/lib/python3.10/site-packages/nltk/draw/table.py new file mode 100644 index 0000000000000000000000000000000000000000..0d3526d5f1bf223684a1293dd5ff32ef6cbbbf55 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/draw/table.py @@ -0,0 +1,1177 @@ +# Natural Language Toolkit: Table widget +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Tkinter widgets for displaying multi-column listboxes and tables. +""" + +import operator +from tkinter import Frame, Label, Listbox, Scrollbar, Tk + +###################################################################### +# Multi-Column Listbox +###################################################################### + + +class MultiListbox(Frame): + """ + A multi-column listbox, where the current selection applies to an + entire row. Based on the MultiListbox Tkinter widget + recipe from the Python Cookbook (https://code.activestate.com/recipes/52266/) + + For the most part, ``MultiListbox`` methods delegate to its + contained listboxes. For any methods that do not have docstrings, + see ``Tkinter.Listbox`` for a description of what that method does. + """ + + # ///////////////////////////////////////////////////////////////// + # Configuration + # ///////////////////////////////////////////////////////////////// + + #: Default configuration values for the frame. + FRAME_CONFIG = dict(background="#888", takefocus=True, highlightthickness=1) + + #: Default configurations for the column labels. + LABEL_CONFIG = dict( + borderwidth=1, + relief="raised", + font="helvetica -16 bold", + background="#444", + foreground="white", + ) + + #: Default configuration for the column listboxes. + LISTBOX_CONFIG = dict( + borderwidth=1, + selectborderwidth=0, + highlightthickness=0, + exportselection=False, + selectbackground="#888", + activestyle="none", + takefocus=False, + ) + + # ///////////////////////////////////////////////////////////////// + # Constructor + # ///////////////////////////////////////////////////////////////// + + def __init__(self, master, columns, column_weights=None, cnf={}, **kw): + """ + Construct a new multi-column listbox widget. + + :param master: The widget that should contain the new + multi-column listbox. + + :param columns: Specifies what columns should be included in + the new multi-column listbox. If ``columns`` is an integer, + then it is the number of columns to include. If it is + a list, then its length indicates the number of columns + to include; and each element of the list will be used as + a label for the corresponding column. + + :param cnf, kw: Configuration parameters for this widget. + Use ``label_*`` to configure all labels; and ``listbox_*`` + to configure all listboxes. E.g.: + >>> root = Tk() # doctest: +SKIP + >>> MultiListbox(root, ["Subject", "Sender", "Date"], label_foreground='red').pack() # doctest: +SKIP + """ + # If columns was specified as an int, convert it to a list. + if isinstance(columns, int): + columns = list(range(columns)) + include_labels = False + else: + include_labels = True + + if len(columns) == 0: + raise ValueError("Expected at least one column") + + # Instance variables + self._column_names = tuple(columns) + self._listboxes = [] + self._labels = [] + + # Pick a default value for column_weights, if none was specified. + if column_weights is None: + column_weights = [1] * len(columns) + elif len(column_weights) != len(columns): + raise ValueError("Expected one column_weight for each column") + self._column_weights = column_weights + + # Configure our widgets. + Frame.__init__(self, master, **self.FRAME_CONFIG) + self.grid_rowconfigure(1, weight=1) + for i, label in enumerate(self._column_names): + self.grid_columnconfigure(i, weight=column_weights[i]) + + # Create a label for the column + if include_labels: + l = Label(self, text=label, **self.LABEL_CONFIG) + self._labels.append(l) + l.grid(column=i, row=0, sticky="news", padx=0, pady=0) + l.column_index = i + + # Create a listbox for the column + lb = Listbox(self, **self.LISTBOX_CONFIG) + self._listboxes.append(lb) + lb.grid(column=i, row=1, sticky="news", padx=0, pady=0) + lb.column_index = i + + # Clicking or dragging selects: + lb.bind("", self._select) + lb.bind("", self._select) + # Scroll wheel scrolls: + lb.bind("", lambda e: self._scroll(-1)) + lb.bind("", lambda e: self._scroll(+1)) + lb.bind("", lambda e: self._scroll(e.delta)) + # Button 2 can be used to scan: + lb.bind("", lambda e: self.scan_mark(e.x, e.y)) + lb.bind("", lambda e: self.scan_dragto(e.x, e.y)) + # Dragging outside the window has no effect (disable + # the default listbox behavior, which scrolls): + lb.bind("", lambda e: "break") + # Columns can be resized by dragging them: + lb.bind("", self._resize_column) + + # Columns can be resized by dragging them. (This binding is + # used if they click on the grid between columns:) + self.bind("", self._resize_column) + + # Set up key bindings for the widget: + self.bind("", lambda e: self.select(delta=-1)) + self.bind("", lambda e: self.select(delta=1)) + self.bind("", lambda e: self.select(delta=-self._pagesize())) + self.bind("", lambda e: self.select(delta=self._pagesize())) + + # Configuration customizations + self.configure(cnf, **kw) + + # ///////////////////////////////////////////////////////////////// + # Column Resizing + # ///////////////////////////////////////////////////////////////// + + def _resize_column(self, event): + """ + Callback used to resize a column of the table. Return ``True`` + if the column is actually getting resized (if the user clicked + on the far left or far right 5 pixels of a label); and + ``False`` otherwies. + """ + # If we're already waiting for a button release, then ignore + # the new button press. + if event.widget.bind(""): + return False + + # Decide which column (if any) to resize. + self._resize_column_index = None + if event.widget is self: + for i, lb in enumerate(self._listboxes): + if abs(event.x - (lb.winfo_x() + lb.winfo_width())) < 10: + self._resize_column_index = i + elif event.x > (event.widget.winfo_width() - 5): + self._resize_column_index = event.widget.column_index + elif event.x < 5 and event.widget.column_index != 0: + self._resize_column_index = event.widget.column_index - 1 + + # Bind callbacks that are used to resize it. + if self._resize_column_index is not None: + event.widget.bind("", self._resize_column_motion_cb) + event.widget.bind( + "" % event.num, self._resize_column_buttonrelease_cb + ) + return True + else: + return False + + def _resize_column_motion_cb(self, event): + lb = self._listboxes[self._resize_column_index] + charwidth = lb.winfo_width() / lb["width"] + + x1 = event.x + event.widget.winfo_x() + x2 = lb.winfo_x() + lb.winfo_width() + + lb["width"] = max(3, lb["width"] + (x1 - x2) // charwidth) + + def _resize_column_buttonrelease_cb(self, event): + event.widget.unbind("" % event.num) + event.widget.unbind("") + + # ///////////////////////////////////////////////////////////////// + # Properties + # ///////////////////////////////////////////////////////////////// + + @property + def column_names(self): + """ + A tuple containing the names of the columns used by this + multi-column listbox. + """ + return self._column_names + + @property + def column_labels(self): + """ + A tuple containing the ``Tkinter.Label`` widgets used to + display the label of each column. If this multi-column + listbox was created without labels, then this will be an empty + tuple. These widgets will all be augmented with a + ``column_index`` attribute, which can be used to determine + which column they correspond to. This can be convenient, + e.g., when defining callbacks for bound events. + """ + return tuple(self._labels) + + @property + def listboxes(self): + """ + A tuple containing the ``Tkinter.Listbox`` widgets used to + display individual columns. These widgets will all be + augmented with a ``column_index`` attribute, which can be used + to determine which column they correspond to. This can be + convenient, e.g., when defining callbacks for bound events. + """ + return tuple(self._listboxes) + + # ///////////////////////////////////////////////////////////////// + # Mouse & Keyboard Callback Functions + # ///////////////////////////////////////////////////////////////// + + def _select(self, e): + i = e.widget.nearest(e.y) + self.selection_clear(0, "end") + self.selection_set(i) + self.activate(i) + self.focus() + + def _scroll(self, delta): + for lb in self._listboxes: + lb.yview_scroll(delta, "unit") + return "break" + + def _pagesize(self): + """:return: The number of rows that makes up one page""" + return int(self.index("@0,1000000")) - int(self.index("@0,0")) + + # ///////////////////////////////////////////////////////////////// + # Row selection + # ///////////////////////////////////////////////////////////////// + + def select(self, index=None, delta=None, see=True): + """ + Set the selected row. If ``index`` is specified, then select + row ``index``. Otherwise, if ``delta`` is specified, then move + the current selection by ``delta`` (negative numbers for up, + positive numbers for down). This will not move the selection + past the top or the bottom of the list. + + :param see: If true, then call ``self.see()`` with the newly + selected index, to ensure that it is visible. + """ + if (index is not None) and (delta is not None): + raise ValueError("specify index or delta, but not both") + + # If delta was given, then calculate index. + if delta is not None: + if len(self.curselection()) == 0: + index = -1 + delta + else: + index = int(self.curselection()[0]) + delta + + # Clear all selected rows. + self.selection_clear(0, "end") + + # Select the specified index + if index is not None: + index = min(max(index, 0), self.size() - 1) + # self.activate(index) + self.selection_set(index) + if see: + self.see(index) + + # ///////////////////////////////////////////////////////////////// + # Configuration + # ///////////////////////////////////////////////////////////////// + + def configure(self, cnf={}, **kw): + """ + Configure this widget. Use ``label_*`` to configure all + labels; and ``listbox_*`` to configure all listboxes. E.g.: + + >>> master = Tk() # doctest: +SKIP + >>> mlb = MultiListbox(master, 5) # doctest: +SKIP + >>> mlb.configure(label_foreground='red') # doctest: +SKIP + >>> mlb.configure(listbox_foreground='red') # doctest: +SKIP + """ + cnf = dict(list(cnf.items()) + list(kw.items())) + for (key, val) in list(cnf.items()): + if key.startswith("label_") or key.startswith("label-"): + for label in self._labels: + label.configure({key[6:]: val}) + elif key.startswith("listbox_") or key.startswith("listbox-"): + for listbox in self._listboxes: + listbox.configure({key[8:]: val}) + else: + Frame.configure(self, {key: val}) + + def __setitem__(self, key, val): + """ + Configure this widget. This is equivalent to + ``self.configure({key,val``)}. See ``configure()``. + """ + self.configure({key: val}) + + def rowconfigure(self, row_index, cnf={}, **kw): + """ + Configure all table cells in the given row. Valid keyword + arguments are: ``background``, ``bg``, ``foreground``, ``fg``, + ``selectbackground``, ``selectforeground``. + """ + for lb in self._listboxes: + lb.itemconfigure(row_index, cnf, **kw) + + def columnconfigure(self, col_index, cnf={}, **kw): + """ + Configure all table cells in the given column. Valid keyword + arguments are: ``background``, ``bg``, ``foreground``, ``fg``, + ``selectbackground``, ``selectforeground``. + """ + lb = self._listboxes[col_index] + + cnf = dict(list(cnf.items()) + list(kw.items())) + for (key, val) in list(cnf.items()): + if key in ( + "background", + "bg", + "foreground", + "fg", + "selectbackground", + "selectforeground", + ): + for i in range(lb.size()): + lb.itemconfigure(i, {key: val}) + else: + lb.configure({key: val}) + + def itemconfigure(self, row_index, col_index, cnf=None, **kw): + """ + Configure the table cell at the given row and column. Valid + keyword arguments are: ``background``, ``bg``, ``foreground``, + ``fg``, ``selectbackground``, ``selectforeground``. + """ + lb = self._listboxes[col_index] + return lb.itemconfigure(row_index, cnf, **kw) + + # ///////////////////////////////////////////////////////////////// + # Value Access + # ///////////////////////////////////////////////////////////////// + + def insert(self, index, *rows): + """ + Insert the given row or rows into the table, at the given + index. Each row value should be a tuple of cell values, one + for each column in the row. Index may be an integer or any of + the special strings (such as ``'end'``) accepted by + ``Tkinter.Listbox``. + """ + for elt in rows: + if len(elt) != len(self._column_names): + raise ValueError( + "rows should be tuples whose length " + "is equal to the number of columns" + ) + for (lb, elts) in zip(self._listboxes, list(zip(*rows))): + lb.insert(index, *elts) + + def get(self, first, last=None): + """ + Return the value(s) of the specified row(s). If ``last`` is + not specified, then return a single row value; otherwise, + return a list of row values. Each row value is a tuple of + cell values, one for each column in the row. + """ + values = [lb.get(first, last) for lb in self._listboxes] + if last: + return [tuple(row) for row in zip(*values)] + else: + return tuple(values) + + def bbox(self, row, col): + """ + Return the bounding box for the given table cell, relative to + this widget's top-left corner. The bounding box is a tuple + of integers ``(left, top, width, height)``. + """ + dx, dy, _, _ = self.grid_bbox(row=0, column=col) + x, y, w, h = self._listboxes[col].bbox(row) + return int(x) + int(dx), int(y) + int(dy), int(w), int(h) + + # ///////////////////////////////////////////////////////////////// + # Hide/Show Columns + # ///////////////////////////////////////////////////////////////// + + def hide_column(self, col_index): + """ + Hide the given column. The column's state is still + maintained: its values will still be returned by ``get()``, and + you must supply its values when calling ``insert()``. It is + safe to call this on a column that is already hidden. + + :see: ``show_column()`` + """ + if self._labels: + self._labels[col_index].grid_forget() + self.listboxes[col_index].grid_forget() + self.grid_columnconfigure(col_index, weight=0) + + def show_column(self, col_index): + """ + Display a column that has been hidden using ``hide_column()``. + It is safe to call this on a column that is not hidden. + """ + weight = self._column_weights[col_index] + if self._labels: + self._labels[col_index].grid( + column=col_index, row=0, sticky="news", padx=0, pady=0 + ) + self._listboxes[col_index].grid( + column=col_index, row=1, sticky="news", padx=0, pady=0 + ) + self.grid_columnconfigure(col_index, weight=weight) + + # ///////////////////////////////////////////////////////////////// + # Binding Methods + # ///////////////////////////////////////////////////////////////// + + def bind_to_labels(self, sequence=None, func=None, add=None): + """ + Add a binding to each ``Tkinter.Label`` widget in this + mult-column listbox that will call ``func`` in response to the + event sequence. + + :return: A list of the identifiers of replaced binding + functions (if any), allowing for their deletion (to + prevent a memory leak). + """ + return [label.bind(sequence, func, add) for label in self.column_labels] + + def bind_to_listboxes(self, sequence=None, func=None, add=None): + """ + Add a binding to each ``Tkinter.Listbox`` widget in this + mult-column listbox that will call ``func`` in response to the + event sequence. + + :return: A list of the identifiers of replaced binding + functions (if any), allowing for their deletion (to + prevent a memory leak). + """ + for listbox in self.listboxes: + listbox.bind(sequence, func, add) + + def bind_to_columns(self, sequence=None, func=None, add=None): + """ + Add a binding to each ``Tkinter.Label`` and ``Tkinter.Listbox`` + widget in this mult-column listbox that will call ``func`` in + response to the event sequence. + + :return: A list of the identifiers of replaced binding + functions (if any), allowing for their deletion (to + prevent a memory leak). + """ + return self.bind_to_labels(sequence, func, add) + self.bind_to_listboxes( + sequence, func, add + ) + + # ///////////////////////////////////////////////////////////////// + # Simple Delegation + # ///////////////////////////////////////////////////////////////// + + # These methods delegate to the first listbox: + def curselection(self, *args, **kwargs): + return self._listboxes[0].curselection(*args, **kwargs) + + def selection_includes(self, *args, **kwargs): + return self._listboxes[0].selection_includes(*args, **kwargs) + + def itemcget(self, *args, **kwargs): + return self._listboxes[0].itemcget(*args, **kwargs) + + def size(self, *args, **kwargs): + return self._listboxes[0].size(*args, **kwargs) + + def index(self, *args, **kwargs): + return self._listboxes[0].index(*args, **kwargs) + + def nearest(self, *args, **kwargs): + return self._listboxes[0].nearest(*args, **kwargs) + + # These methods delegate to each listbox (and return None): + def activate(self, *args, **kwargs): + for lb in self._listboxes: + lb.activate(*args, **kwargs) + + def delete(self, *args, **kwargs): + for lb in self._listboxes: + lb.delete(*args, **kwargs) + + def scan_mark(self, *args, **kwargs): + for lb in self._listboxes: + lb.scan_mark(*args, **kwargs) + + def scan_dragto(self, *args, **kwargs): + for lb in self._listboxes: + lb.scan_dragto(*args, **kwargs) + + def see(self, *args, **kwargs): + for lb in self._listboxes: + lb.see(*args, **kwargs) + + def selection_anchor(self, *args, **kwargs): + for lb in self._listboxes: + lb.selection_anchor(*args, **kwargs) + + def selection_clear(self, *args, **kwargs): + for lb in self._listboxes: + lb.selection_clear(*args, **kwargs) + + def selection_set(self, *args, **kwargs): + for lb in self._listboxes: + lb.selection_set(*args, **kwargs) + + def yview(self, *args, **kwargs): + for lb in self._listboxes: + v = lb.yview(*args, **kwargs) + return v # if called with no arguments + + def yview_moveto(self, *args, **kwargs): + for lb in self._listboxes: + lb.yview_moveto(*args, **kwargs) + + def yview_scroll(self, *args, **kwargs): + for lb in self._listboxes: + lb.yview_scroll(*args, **kwargs) + + # ///////////////////////////////////////////////////////////////// + # Aliases + # ///////////////////////////////////////////////////////////////// + + itemconfig = itemconfigure + rowconfig = rowconfigure + columnconfig = columnconfigure + select_anchor = selection_anchor + select_clear = selection_clear + select_includes = selection_includes + select_set = selection_set + + # ///////////////////////////////////////////////////////////////// + # These listbox methods are not defined for multi-listbox + # ///////////////////////////////////////////////////////////////// + # def xview(self, *what): pass + # def xview_moveto(self, fraction): pass + # def xview_scroll(self, number, what): pass + + +###################################################################### +# Table +###################################################################### + + +class Table: + """ + A display widget for a table of values, based on a ``MultiListbox`` + widget. For many purposes, ``Table`` can be treated as a + list-of-lists. E.g., table[i] is a list of the values for row i; + and table.append(row) adds a new row with the given list of + values. Individual cells can be accessed using table[i,j], which + refers to the j-th column of the i-th row. This can be used to + both read and write values from the table. E.g.: + + >>> table[i,j] = 'hello' # doctest: +SKIP + + The column (j) can be given either as an index number, or as a + column name. E.g., the following prints the value in the 3rd row + for the 'First Name' column: + + >>> print(table[3, 'First Name']) # doctest: +SKIP + John + + You can configure the colors for individual rows, columns, or + cells using ``rowconfig()``, ``columnconfig()``, and ``itemconfig()``. + The color configuration for each row will be preserved if the + table is modified; however, when new rows are added, any color + configurations that have been made for *columns* will not be + applied to the new row. + + Note: Although ``Table`` acts like a widget in some ways (e.g., it + defines ``grid()``, ``pack()``, and ``bind()``), it is not itself a + widget; it just contains one. This is because widgets need to + define ``__getitem__()``, ``__setitem__()``, and ``__nonzero__()`` in + a way that's incompatible with the fact that ``Table`` behaves as a + list-of-lists. + + :ivar _mlb: The multi-column listbox used to display this table's data. + :ivar _rows: A list-of-lists used to hold the cell values of this + table. Each element of _rows is a row value, i.e., a list of + cell values, one for each column in the row. + """ + + def __init__( + self, + master, + column_names, + rows=None, + column_weights=None, + scrollbar=True, + click_to_sort=True, + reprfunc=None, + cnf={}, + **kw + ): + """ + Construct a new Table widget. + + :type master: Tkinter.Widget + :param master: The widget that should contain the new table. + :type column_names: list(str) + :param column_names: A list of names for the columns; these + names will be used to create labels for each column; + and can be used as an index when reading or writing + cell values from the table. + :type rows: list(list) + :param rows: A list of row values used to initialize the table. + Each row value should be a tuple of cell values, one for + each column in the row. + :type scrollbar: bool + :param scrollbar: If true, then create a scrollbar for the + new table widget. + :type click_to_sort: bool + :param click_to_sort: If true, then create bindings that will + sort the table's rows by a given column's values if the + user clicks on that colum's label. + :type reprfunc: function + :param reprfunc: If specified, then use this function to + convert each table cell value to a string suitable for + display. ``reprfunc`` has the following signature: + reprfunc(row_index, col_index, cell_value) -> str + (Note that the column is specified by index, not by name.) + :param cnf, kw: Configuration parameters for this widget's + contained ``MultiListbox``. See ``MultiListbox.__init__()`` + for details. + """ + self._num_columns = len(column_names) + self._reprfunc = reprfunc + self._frame = Frame(master) + + self._column_name_to_index = {c: i for (i, c) in enumerate(column_names)} + + # Make a copy of the rows & check that it's valid. + if rows is None: + self._rows = [] + else: + self._rows = [[v for v in row] for row in rows] + for row in self._rows: + self._checkrow(row) + + # Create our multi-list box. + self._mlb = MultiListbox(self._frame, column_names, column_weights, cnf, **kw) + self._mlb.pack(side="left", expand=True, fill="both") + + # Optional scrollbar + if scrollbar: + sb = Scrollbar(self._frame, orient="vertical", command=self._mlb.yview) + self._mlb.listboxes[0]["yscrollcommand"] = sb.set + # for listbox in self._mlb.listboxes: + # listbox['yscrollcommand'] = sb.set + sb.pack(side="right", fill="y") + self._scrollbar = sb + + # Set up sorting + self._sortkey = None + if click_to_sort: + for i, l in enumerate(self._mlb.column_labels): + l.bind("", self._sort) + + # Fill in our multi-list box. + self._fill_table() + + # ///////////////////////////////////////////////////////////////// + # { Widget-like Methods + # ///////////////////////////////////////////////////////////////// + # These all just delegate to either our frame or our MLB. + + def pack(self, *args, **kwargs): + """Position this table's main frame widget in its parent + widget. See ``Tkinter.Frame.pack()`` for more info.""" + self._frame.pack(*args, **kwargs) + + def grid(self, *args, **kwargs): + """Position this table's main frame widget in its parent + widget. See ``Tkinter.Frame.grid()`` for more info.""" + self._frame.grid(*args, **kwargs) + + def focus(self): + """Direct (keyboard) input foxus to this widget.""" + self._mlb.focus() + + def bind(self, sequence=None, func=None, add=None): + """Add a binding to this table's main frame that will call + ``func`` in response to the event sequence.""" + self._mlb.bind(sequence, func, add) + + def rowconfigure(self, row_index, cnf={}, **kw): + """:see: ``MultiListbox.rowconfigure()``""" + self._mlb.rowconfigure(row_index, cnf, **kw) + + def columnconfigure(self, col_index, cnf={}, **kw): + """:see: ``MultiListbox.columnconfigure()``""" + col_index = self.column_index(col_index) + self._mlb.columnconfigure(col_index, cnf, **kw) + + def itemconfigure(self, row_index, col_index, cnf=None, **kw): + """:see: ``MultiListbox.itemconfigure()``""" + col_index = self.column_index(col_index) + return self._mlb.itemconfigure(row_index, col_index, cnf, **kw) + + def bind_to_labels(self, sequence=None, func=None, add=None): + """:see: ``MultiListbox.bind_to_labels()``""" + return self._mlb.bind_to_labels(sequence, func, add) + + def bind_to_listboxes(self, sequence=None, func=None, add=None): + """:see: ``MultiListbox.bind_to_listboxes()``""" + return self._mlb.bind_to_listboxes(sequence, func, add) + + def bind_to_columns(self, sequence=None, func=None, add=None): + """:see: ``MultiListbox.bind_to_columns()``""" + return self._mlb.bind_to_columns(sequence, func, add) + + rowconfig = rowconfigure + columnconfig = columnconfigure + itemconfig = itemconfigure + + # ///////////////////////////////////////////////////////////////// + # { Table as list-of-lists + # ///////////////////////////////////////////////////////////////// + + def insert(self, row_index, rowvalue): + """ + Insert a new row into the table, so that its row index will be + ``row_index``. If the table contains any rows whose row index + is greater than or equal to ``row_index``, then they will be + shifted down. + + :param rowvalue: A tuple of cell values, one for each column + in the new row. + """ + self._checkrow(rowvalue) + self._rows.insert(row_index, rowvalue) + if self._reprfunc is not None: + rowvalue = [ + self._reprfunc(row_index, j, v) for (j, v) in enumerate(rowvalue) + ] + self._mlb.insert(row_index, rowvalue) + if self._DEBUG: + self._check_table_vs_mlb() + + def extend(self, rowvalues): + """ + Add new rows at the end of the table. + + :param rowvalues: A list of row values used to initialize the + table. Each row value should be a tuple of cell values, + one for each column in the row. + """ + for rowvalue in rowvalues: + self.append(rowvalue) + if self._DEBUG: + self._check_table_vs_mlb() + + def append(self, rowvalue): + """ + Add a new row to the end of the table. + + :param rowvalue: A tuple of cell values, one for each column + in the new row. + """ + self.insert(len(self._rows), rowvalue) + if self._DEBUG: + self._check_table_vs_mlb() + + def clear(self): + """ + Delete all rows in this table. + """ + self._rows = [] + self._mlb.delete(0, "end") + if self._DEBUG: + self._check_table_vs_mlb() + + def __getitem__(self, index): + """ + Return the value of a row or a cell in this table. If + ``index`` is an integer, then the row value for the ``index``th + row. This row value consists of a tuple of cell values, one + for each column in the row. If ``index`` is a tuple of two + integers, ``(i,j)``, then return the value of the cell in the + ``i``th row and the ``j``th column. + """ + if isinstance(index, slice): + raise ValueError("Slicing not supported") + elif isinstance(index, tuple) and len(index) == 2: + return self._rows[index[0]][self.column_index(index[1])] + else: + return tuple(self._rows[index]) + + def __setitem__(self, index, val): + """ + Replace the value of a row or a cell in this table with + ``val``. + + If ``index`` is an integer, then ``val`` should be a row value + (i.e., a tuple of cell values, one for each column). In this + case, the values of the ``index``th row of the table will be + replaced with the values in ``val``. + + If ``index`` is a tuple of integers, ``(i,j)``, then replace the + value of the cell in the ``i``th row and ``j``th column with + ``val``. + """ + if isinstance(index, slice): + raise ValueError("Slicing not supported") + + # table[i,j] = val + elif isinstance(index, tuple) and len(index) == 2: + i, j = index[0], self.column_index(index[1]) + config_cookie = self._save_config_info([i]) + self._rows[i][j] = val + if self._reprfunc is not None: + val = self._reprfunc(i, j, val) + self._mlb.listboxes[j].insert(i, val) + self._mlb.listboxes[j].delete(i + 1) + self._restore_config_info(config_cookie) + + # table[i] = val + else: + config_cookie = self._save_config_info([index]) + self._checkrow(val) + self._rows[index] = list(val) + if self._reprfunc is not None: + val = [self._reprfunc(index, j, v) for (j, v) in enumerate(val)] + self._mlb.insert(index, val) + self._mlb.delete(index + 1) + self._restore_config_info(config_cookie) + + def __delitem__(self, row_index): + """ + Delete the ``row_index``th row from this table. + """ + if isinstance(row_index, slice): + raise ValueError("Slicing not supported") + if isinstance(row_index, tuple) and len(row_index) == 2: + raise ValueError("Cannot delete a single cell!") + del self._rows[row_index] + self._mlb.delete(row_index) + if self._DEBUG: + self._check_table_vs_mlb() + + def __len__(self): + """ + :return: the number of rows in this table. + """ + return len(self._rows) + + def _checkrow(self, rowvalue): + """ + Helper function: check that a given row value has the correct + number of elements; and if not, raise an exception. + """ + if len(rowvalue) != self._num_columns: + raise ValueError( + "Row %r has %d columns; expected %d" + % (rowvalue, len(rowvalue), self._num_columns) + ) + + # ///////////////////////////////////////////////////////////////// + # Columns + # ///////////////////////////////////////////////////////////////// + + @property + def column_names(self): + """A list of the names of the columns in this table.""" + return self._mlb.column_names + + def column_index(self, i): + """ + If ``i`` is a valid column index integer, then return it as is. + Otherwise, check if ``i`` is used as the name for any column; + if so, return that column's index. Otherwise, raise a + ``KeyError`` exception. + """ + if isinstance(i, int) and 0 <= i < self._num_columns: + return i + else: + # This raises a key error if the column is not found. + return self._column_name_to_index[i] + + def hide_column(self, column_index): + """:see: ``MultiListbox.hide_column()``""" + self._mlb.hide_column(self.column_index(column_index)) + + def show_column(self, column_index): + """:see: ``MultiListbox.show_column()``""" + self._mlb.show_column(self.column_index(column_index)) + + # ///////////////////////////////////////////////////////////////// + # Selection + # ///////////////////////////////////////////////////////////////// + + def selected_row(self): + """ + Return the index of the currently selected row, or None if + no row is selected. To get the row value itself, use + ``table[table.selected_row()]``. + """ + sel = self._mlb.curselection() + if sel: + return int(sel[0]) + else: + return None + + def select(self, index=None, delta=None, see=True): + """:see: ``MultiListbox.select()``""" + self._mlb.select(index, delta, see) + + # ///////////////////////////////////////////////////////////////// + # Sorting + # ///////////////////////////////////////////////////////////////// + + def sort_by(self, column_index, order="toggle"): + """ + Sort the rows in this table, using the specified column's + values as a sort key. + + :param column_index: Specifies which column to sort, using + either a column index (int) or a column's label name + (str). + + :param order: Specifies whether to sort the values in + ascending or descending order: + + - ``'ascending'``: Sort from least to greatest. + - ``'descending'``: Sort from greatest to least. + - ``'toggle'``: If the most recent call to ``sort_by()`` + sorted the table by the same column (``column_index``), + then reverse the rows; otherwise sort in ascending + order. + """ + if order not in ("ascending", "descending", "toggle"): + raise ValueError( + 'sort_by(): order should be "ascending", ' '"descending", or "toggle".' + ) + column_index = self.column_index(column_index) + config_cookie = self._save_config_info(index_by_id=True) + + # Sort the rows. + if order == "toggle" and column_index == self._sortkey: + self._rows.reverse() + else: + self._rows.sort( + key=operator.itemgetter(column_index), reverse=(order == "descending") + ) + self._sortkey = column_index + + # Redraw the table. + self._fill_table() + self._restore_config_info(config_cookie, index_by_id=True, see=True) + if self._DEBUG: + self._check_table_vs_mlb() + + def _sort(self, event): + """Event handler for clicking on a column label -- sort by + that column.""" + column_index = event.widget.column_index + + # If they click on the far-left of far-right of a column's + # label, then resize rather than sorting. + if self._mlb._resize_column(event): + return "continue" + + # Otherwise, sort. + else: + self.sort_by(column_index) + return "continue" + + # ///////////////////////////////////////////////////////////////// + # { Table Drawing Helpers + # ///////////////////////////////////////////////////////////////// + + def _fill_table(self, save_config=True): + """ + Re-draw the table from scratch, by clearing out the table's + multi-column listbox; and then filling it in with values from + ``self._rows``. Note that any cell-, row-, or column-specific + color configuration that has been done will be lost. The + selection will also be lost -- i.e., no row will be selected + after this call completes. + """ + self._mlb.delete(0, "end") + for i, row in enumerate(self._rows): + if self._reprfunc is not None: + row = [self._reprfunc(i, j, v) for (j, v) in enumerate(row)] + self._mlb.insert("end", row) + + def _get_itemconfig(self, r, c): + return { + k: self._mlb.itemconfig(r, c, k)[-1] + for k in ( + "foreground", + "selectforeground", + "background", + "selectbackground", + ) + } + + def _save_config_info(self, row_indices=None, index_by_id=False): + """ + Return a 'cookie' containing information about which row is + selected, and what color configurations have been applied. + this information can the be re-applied to the table (after + making modifications) using ``_restore_config_info()``. Color + configuration information will be saved for any rows in + ``row_indices``, or in the entire table, if + ``row_indices=None``. If ``index_by_id=True``, the the cookie + will associate rows with their configuration information based + on the rows' python id. This is useful when performing + operations that re-arrange the rows (e.g. ``sort``). If + ``index_by_id=False``, then it is assumed that all rows will be + in the same order when ``_restore_config_info()`` is called. + """ + # Default value for row_indices is all rows. + if row_indices is None: + row_indices = list(range(len(self._rows))) + + # Look up our current selection. + selection = self.selected_row() + if index_by_id and selection is not None: + selection = id(self._rows[selection]) + + # Look up the color configuration info for each row. + if index_by_id: + config = { + id(self._rows[r]): [ + self._get_itemconfig(r, c) for c in range(self._num_columns) + ] + for r in row_indices + } + else: + config = { + r: [self._get_itemconfig(r, c) for c in range(self._num_columns)] + for r in row_indices + } + + return selection, config + + def _restore_config_info(self, cookie, index_by_id=False, see=False): + """ + Restore selection & color configuration information that was + saved using ``_save_config_info``. + """ + selection, config = cookie + + # Clear the selection. + if selection is None: + self._mlb.selection_clear(0, "end") + + # Restore selection & color config + if index_by_id: + for r, row in enumerate(self._rows): + if id(row) in config: + for c in range(self._num_columns): + self._mlb.itemconfigure(r, c, config[id(row)][c]) + if id(row) == selection: + self._mlb.select(r, see=see) + else: + if selection is not None: + self._mlb.select(selection, see=see) + for r in config: + for c in range(self._num_columns): + self._mlb.itemconfigure(r, c, config[r][c]) + + # ///////////////////////////////////////////////////////////////// + # Debugging (Invariant Checker) + # ///////////////////////////////////////////////////////////////// + + _DEBUG = False + """If true, then run ``_check_table_vs_mlb()`` after any operation + that modifies the table.""" + + def _check_table_vs_mlb(self): + """ + Verify that the contents of the table's ``_rows`` variable match + the contents of its multi-listbox (``_mlb``). This is just + included for debugging purposes, to make sure that the + list-modifying operations are working correctly. + """ + for col in self._mlb.listboxes: + assert len(self) == col.size() + for row in self: + assert len(row) == self._num_columns + assert self._num_columns == len(self._mlb.column_names) + # assert self._column_names == self._mlb.column_names + for i, row in enumerate(self): + for j, cell in enumerate(row): + if self._reprfunc is not None: + cell = self._reprfunc(i, j, cell) + assert self._mlb.get(i)[j] == cell + + +###################################################################### +# Demo/Test Function +###################################################################### + +# update this to use new WordNet API +def demo(): + root = Tk() + root.bind("", lambda e: root.destroy()) + + table = Table( + root, + "Word Synset Hypernym Hyponym".split(), + column_weights=[0, 1, 1, 1], + reprfunc=(lambda i, j, s: " %s" % s), + ) + table.pack(expand=True, fill="both") + + from nltk.corpus import brown, wordnet + + for word, pos in sorted(set(brown.tagged_words()[:500])): + if pos[0] != "N": + continue + word = word.lower() + for synset in wordnet.synsets(word): + try: + hyper_def = synset.hypernyms()[0].definition() + except: + hyper_def = "*none*" + try: + hypo_def = synset.hypernyms()[0].definition() + except: + hypo_def = "*none*" + table.append([word, synset.definition(), hyper_def, hypo_def]) + + table.columnconfig("Word", background="#afa") + table.columnconfig("Synset", background="#efe") + table.columnconfig("Hypernym", background="#fee") + table.columnconfig("Hyponym", background="#ffe") + for row in range(len(table)): + for column in ("Hypernym", "Hyponym"): + if table[row, column] == "*none*": + table.itemconfig( + row, column, foreground="#666", selectforeground="#666" + ) + root.mainloop() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/draw/tree.py b/lib/python3.10/site-packages/nltk/draw/tree.py new file mode 100644 index 0000000000000000000000000000000000000000..6a2791428fcab5a47dd6d88561971d6907f74084 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/draw/tree.py @@ -0,0 +1,1129 @@ +# Natural Language Toolkit: Graphical Representations for Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Graphically display a Tree. +""" + +from tkinter import IntVar, Menu, Tk + +from nltk.draw.util import ( + BoxWidget, + CanvasFrame, + CanvasWidget, + OvalWidget, + ParenWidget, + TextWidget, +) +from nltk.tree import Tree +from nltk.util import in_idle + +##////////////////////////////////////////////////////// +## Tree Segment +##////////////////////////////////////////////////////// + + +class TreeSegmentWidget(CanvasWidget): + """ + A canvas widget that displays a single segment of a hierarchical + tree. Each ``TreeSegmentWidget`` connects a single "node widget" + to a sequence of zero or more "subtree widgets". By default, the + bottom of the node is connected to the top of each subtree by a + single line. However, if the ``roof`` attribute is set, then a + single triangular "roof" will connect the node to all of its + children. + + Attributes: + - ``roof``: What sort of connection to draw between the node and + its subtrees. If ``roof`` is true, draw a single triangular + "roof" over the subtrees. If ``roof`` is false, draw a line + between each subtree and the node. Default value is false. + - ``xspace``: The amount of horizontal space to leave between + subtrees when managing this widget. Default value is 10. + - ``yspace``: The amount of space to place between the node and + its children when managing this widget. Default value is 15. + - ``color``: The color of the lines connecting the node to its + subtrees; and of the outline of the triangular roof. Default + value is ``'#006060'``. + - ``fill``: The fill color for the triangular roof. Default + value is ``''`` (no fill). + - ``width``: The width of the lines connecting the node to its + subtrees; and of the outline of the triangular roof. Default + value is 1. + - ``orientation``: Determines whether the tree branches downwards + or rightwards. Possible values are ``'horizontal'`` and + ``'vertical'``. The default value is ``'vertical'`` (i.e., + branch downwards). + - ``draggable``: whether the widget can be dragged by the user. + """ + + def __init__(self, canvas, label, subtrees, **attribs): + """ + :type node: + :type subtrees: list(CanvasWidgetI) + """ + self._label = label + self._subtrees = subtrees + + # Attributes + self._horizontal = 0 + self._roof = 0 + self._xspace = 10 + self._yspace = 15 + self._ordered = False + + # Create canvas objects. + self._lines = [canvas.create_line(0, 0, 0, 0, fill="#006060") for c in subtrees] + self._polygon = canvas.create_polygon( + 0, 0, fill="", state="hidden", outline="#006060" + ) + + # Register child widgets (label + subtrees) + self._add_child_widget(label) + for subtree in subtrees: + self._add_child_widget(subtree) + + # Are we currently managing? + self._managing = False + + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + canvas = self.canvas() + if attr == "roof": + self._roof = value + if self._roof: + for l in self._lines: + canvas.itemconfig(l, state="hidden") + canvas.itemconfig(self._polygon, state="normal") + else: + for l in self._lines: + canvas.itemconfig(l, state="normal") + canvas.itemconfig(self._polygon, state="hidden") + elif attr == "orientation": + if value == "horizontal": + self._horizontal = 1 + elif value == "vertical": + self._horizontal = 0 + else: + raise ValueError("orientation must be horizontal or vertical") + elif attr == "color": + for l in self._lines: + canvas.itemconfig(l, fill=value) + canvas.itemconfig(self._polygon, outline=value) + elif isinstance(attr, tuple) and attr[0] == "color": + # Set the color of an individual line. + l = self._lines[int(attr[1])] + canvas.itemconfig(l, fill=value) + elif attr == "fill": + canvas.itemconfig(self._polygon, fill=value) + elif attr == "width": + canvas.itemconfig(self._polygon, {attr: value}) + for l in self._lines: + canvas.itemconfig(l, {attr: value}) + elif attr in ("xspace", "yspace"): + if attr == "xspace": + self._xspace = value + elif attr == "yspace": + self._yspace = value + self.update(self._label) + elif attr == "ordered": + self._ordered = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "roof": + return self._roof + elif attr == "width": + return self.canvas().itemcget(self._polygon, attr) + elif attr == "color": + return self.canvas().itemcget(self._polygon, "outline") + elif isinstance(attr, tuple) and attr[0] == "color": + l = self._lines[int(attr[1])] + return self.canvas().itemcget(l, "fill") + elif attr == "xspace": + return self._xspace + elif attr == "yspace": + return self._yspace + elif attr == "orientation": + if self._horizontal: + return "horizontal" + else: + return "vertical" + elif attr == "ordered": + return self._ordered + else: + return CanvasWidget.__getitem__(self, attr) + + def label(self): + return self._label + + def subtrees(self): + return self._subtrees[:] + + def set_label(self, label): + """ + Set the node label to ``label``. + """ + self._remove_child_widget(self._label) + self._add_child_widget(label) + self._label = label + self.update(self._label) + + def replace_child(self, oldchild, newchild): + """ + Replace the child ``oldchild`` with ``newchild``. + """ + index = self._subtrees.index(oldchild) + self._subtrees[index] = newchild + self._remove_child_widget(oldchild) + self._add_child_widget(newchild) + self.update(newchild) + + def remove_child(self, child): + index = self._subtrees.index(child) + del self._subtrees[index] + self._remove_child_widget(child) + self.canvas().delete(self._lines.pop()) + self.update(self._label) + + def insert_child(self, index, child): + canvas = self.canvas() + self._subtrees.insert(index, child) + self._add_child_widget(child) + self._lines.append(canvas.create_line(0, 0, 0, 0, fill="#006060")) + self.update(self._label) + + # but.. lines??? + + def _tags(self): + if self._roof: + return [self._polygon] + else: + return self._lines + + def _subtree_top(self, child): + if isinstance(child, TreeSegmentWidget): + bbox = child.label().bbox() + else: + bbox = child.bbox() + if self._horizontal: + return (bbox[0], (bbox[1] + bbox[3]) / 2.0) + else: + return ((bbox[0] + bbox[2]) / 2.0, bbox[1]) + + def _node_bottom(self): + bbox = self._label.bbox() + if self._horizontal: + return (bbox[2], (bbox[1] + bbox[3]) / 2.0) + else: + return ((bbox[0] + bbox[2]) / 2.0, bbox[3]) + + def _update(self, child): + if len(self._subtrees) == 0: + return + if self._label.bbox() is None: + return # [XX] ??? + + # Which lines need to be redrawn? + if child is self._label: + need_update = self._subtrees + else: + need_update = [child] + + if self._ordered and not self._managing: + need_update = self._maintain_order(child) + + # Update the polygon. + (nodex, nodey) = self._node_bottom() + (xmin, ymin, xmax, ymax) = self._subtrees[0].bbox() + for subtree in self._subtrees[1:]: + bbox = subtree.bbox() + xmin = min(xmin, bbox[0]) + ymin = min(ymin, bbox[1]) + xmax = max(xmax, bbox[2]) + ymax = max(ymax, bbox[3]) + + if self._horizontal: + self.canvas().coords( + self._polygon, nodex, nodey, xmin, ymin, xmin, ymax, nodex, nodey + ) + else: + self.canvas().coords( + self._polygon, nodex, nodey, xmin, ymin, xmax, ymin, nodex, nodey + ) + + # Redraw all lines that need it. + for subtree in need_update: + (nodex, nodey) = self._node_bottom() + line = self._lines[self._subtrees.index(subtree)] + (subtreex, subtreey) = self._subtree_top(subtree) + self.canvas().coords(line, nodex, nodey, subtreex, subtreey) + + def _maintain_order(self, child): + if self._horizontal: + return self._maintain_order_horizontal(child) + else: + return self._maintain_order_vertical(child) + + def _maintain_order_vertical(self, child): + (left, top, right, bot) = child.bbox() + + if child is self._label: + # Check all the leaves + for subtree in self._subtrees: + (x1, y1, x2, y2) = subtree.bbox() + if bot + self._yspace > y1: + subtree.move(0, bot + self._yspace - y1) + + return self._subtrees + else: + moved = [child] + index = self._subtrees.index(child) + + # Check leaves to our right. + x = right + self._xspace + for i in range(index + 1, len(self._subtrees)): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if x > x1: + self._subtrees[i].move(x - x1, 0) + x += x2 - x1 + self._xspace + moved.append(self._subtrees[i]) + + # Check leaves to our left. + x = left - self._xspace + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if x < x2: + self._subtrees[i].move(x - x2, 0) + x -= x2 - x1 + self._xspace + moved.append(self._subtrees[i]) + + # Check the node + (x1, y1, x2, y2) = self._label.bbox() + if y2 > top - self._yspace: + self._label.move(0, top - self._yspace - y2) + moved = self._subtrees + + # Return a list of the nodes we moved + return moved + + def _maintain_order_horizontal(self, child): + (left, top, right, bot) = child.bbox() + + if child is self._label: + # Check all the leaves + for subtree in self._subtrees: + (x1, y1, x2, y2) = subtree.bbox() + if right + self._xspace > x1: + subtree.move(right + self._xspace - x1) + + return self._subtrees + else: + moved = [child] + index = self._subtrees.index(child) + + # Check leaves below us. + y = bot + self._yspace + for i in range(index + 1, len(self._subtrees)): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if y > y1: + self._subtrees[i].move(0, y - y1) + y += y2 - y1 + self._yspace + moved.append(self._subtrees[i]) + + # Check leaves above us + y = top - self._yspace + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if y < y2: + self._subtrees[i].move(0, y - y2) + y -= y2 - y1 + self._yspace + moved.append(self._subtrees[i]) + + # Check the node + (x1, y1, x2, y2) = self._label.bbox() + if x2 > left - self._xspace: + self._label.move(left - self._xspace - x2, 0) + moved = self._subtrees + + # Return a list of the nodes we moved + return moved + + def _manage_horizontal(self): + (nodex, nodey) = self._node_bottom() + + # Put the subtrees in a line. + y = 20 + for subtree in self._subtrees: + subtree_bbox = subtree.bbox() + dx = nodex - subtree_bbox[0] + self._xspace + dy = y - subtree_bbox[1] + subtree.move(dx, dy) + y += subtree_bbox[3] - subtree_bbox[1] + self._yspace + + # Find the center of their tops. + center = 0.0 + for subtree in self._subtrees: + center += self._subtree_top(subtree)[1] + center /= len(self._subtrees) + + # Center the subtrees with the node. + for subtree in self._subtrees: + subtree.move(0, nodey - center) + + def _manage_vertical(self): + (nodex, nodey) = self._node_bottom() + + # Put the subtrees in a line. + x = 0 + for subtree in self._subtrees: + subtree_bbox = subtree.bbox() + dy = nodey - subtree_bbox[1] + self._yspace + dx = x - subtree_bbox[0] + subtree.move(dx, dy) + x += subtree_bbox[2] - subtree_bbox[0] + self._xspace + + # Find the center of their tops. + center = 0.0 + for subtree in self._subtrees: + center += self._subtree_top(subtree)[0] / len(self._subtrees) + + # Center the subtrees with the node. + for subtree in self._subtrees: + subtree.move(nodex - center, 0) + + def _manage(self): + self._managing = True + (nodex, nodey) = self._node_bottom() + if len(self._subtrees) == 0: + return + + if self._horizontal: + self._manage_horizontal() + else: + self._manage_vertical() + + # Update lines to subtrees. + for subtree in self._subtrees: + self._update(subtree) + + self._managing = False + + def __repr__(self): + return f"[TreeSeg {self._label}: {self._subtrees}]" + + +def _tree_to_treeseg( + canvas, + t, + make_node, + make_leaf, + tree_attribs, + node_attribs, + leaf_attribs, + loc_attribs, +): + if isinstance(t, Tree): + label = make_node(canvas, t.label(), **node_attribs) + subtrees = [ + _tree_to_treeseg( + canvas, + child, + make_node, + make_leaf, + tree_attribs, + node_attribs, + leaf_attribs, + loc_attribs, + ) + for child in t + ] + return TreeSegmentWidget(canvas, label, subtrees, **tree_attribs) + else: + return make_leaf(canvas, t, **leaf_attribs) + + +def tree_to_treesegment( + canvas, t, make_node=TextWidget, make_leaf=TextWidget, **attribs +): + """ + Convert a Tree into a ``TreeSegmentWidget``. + + :param make_node: A ``CanvasWidget`` constructor or a function that + creates ``CanvasWidgets``. ``make_node`` is used to convert + the Tree's nodes into ``CanvasWidgets``. If no constructor + is specified, then ``TextWidget`` will be used. + :param make_leaf: A ``CanvasWidget`` constructor or a function that + creates ``CanvasWidgets``. ``make_leaf`` is used to convert + the Tree's leafs into ``CanvasWidgets``. If no constructor + is specified, then ``TextWidget`` will be used. + :param attribs: Attributes for the canvas widgets that make up the + returned ``TreeSegmentWidget``. Any attribute beginning with + ``'tree_'`` will be passed to all ``TreeSegmentWidgets`` (with + the ``'tree_'`` prefix removed. Any attribute beginning with + ``'node_'`` will be passed to all nodes. Any attribute + beginning with ``'leaf_'`` will be passed to all leaves. And + any attribute beginning with ``'loc_'`` will be passed to all + text locations (for Trees). + """ + # Process attribs. + tree_attribs = {} + node_attribs = {} + leaf_attribs = {} + loc_attribs = {} + + for (key, value) in list(attribs.items()): + if key[:5] == "tree_": + tree_attribs[key[5:]] = value + elif key[:5] == "node_": + node_attribs[key[5:]] = value + elif key[:5] == "leaf_": + leaf_attribs[key[5:]] = value + elif key[:4] == "loc_": + loc_attribs[key[4:]] = value + else: + raise ValueError("Bad attribute: %s" % key) + return _tree_to_treeseg( + canvas, + t, + make_node, + make_leaf, + tree_attribs, + node_attribs, + leaf_attribs, + loc_attribs, + ) + + +##////////////////////////////////////////////////////// +## Tree Widget +##////////////////////////////////////////////////////// + + +class TreeWidget(CanvasWidget): + """ + A canvas widget that displays a single Tree. + ``TreeWidget`` manages a group of ``TreeSegmentWidgets`` that are + used to display a Tree. + + Attributes: + + - ``node_attr``: Sets the attribute ``attr`` on all of the + node widgets for this ``TreeWidget``. + - ``node_attr``: Sets the attribute ``attr`` on all of the + leaf widgets for this ``TreeWidget``. + - ``loc_attr``: Sets the attribute ``attr`` on all of the + location widgets for this ``TreeWidget`` (if it was built from + a Tree). Note that a location widget is a ``TextWidget``. + + - ``xspace``: The amount of horizontal space to leave between + subtrees when managing this widget. Default value is 10. + - ``yspace``: The amount of space to place between the node and + its children when managing this widget. Default value is 15. + + - ``line_color``: The color of the lines connecting each expanded + node to its subtrees. + - ``roof_color``: The color of the outline of the triangular roof + for collapsed trees. + - ``roof_fill``: The fill color for the triangular roof for + collapsed trees. + - ``width`` + + - ``orientation``: Determines whether the tree branches downwards + or rightwards. Possible values are ``'horizontal'`` and + ``'vertical'``. The default value is ``'vertical'`` (i.e., + branch downwards). + + - ``shapeable``: whether the subtrees can be independently + dragged by the user. THIS property simply sets the + ``DRAGGABLE`` property on all of the ``TreeWidget``'s tree + segments. + - ``draggable``: whether the widget can be dragged by the user. + """ + + def __init__( + self, canvas, t, make_node=TextWidget, make_leaf=TextWidget, **attribs + ): + # Node & leaf canvas widget constructors + self._make_node = make_node + self._make_leaf = make_leaf + self._tree = t + + # Attributes. + self._nodeattribs = {} + self._leafattribs = {} + self._locattribs = {"color": "#008000"} + self._line_color = "#008080" + self._line_width = 1 + self._roof_color = "#008080" + self._roof_fill = "#c0c0c0" + self._shapeable = False + self._xspace = 10 + self._yspace = 10 + self._orientation = "vertical" + self._ordered = False + + # Build trees. + self._keys = {} # treeseg -> key + self._expanded_trees = {} + self._collapsed_trees = {} + self._nodes = [] + self._leaves = [] + # self._locs = [] + self._make_collapsed_trees(canvas, t, ()) + self._treeseg = self._make_expanded_tree(canvas, t, ()) + self._add_child_widget(self._treeseg) + + CanvasWidget.__init__(self, canvas, **attribs) + + def expanded_tree(self, *path_to_tree): + """ + Return the ``TreeSegmentWidget`` for the specified subtree. + + :param path_to_tree: A list of indices i1, i2, ..., in, where + the desired widget is the widget corresponding to + ``tree.children()[i1].children()[i2]....children()[in]``. + For the root, the path is ``()``. + """ + return self._expanded_trees[path_to_tree] + + def collapsed_tree(self, *path_to_tree): + """ + Return the ``TreeSegmentWidget`` for the specified subtree. + + :param path_to_tree: A list of indices i1, i2, ..., in, where + the desired widget is the widget corresponding to + ``tree.children()[i1].children()[i2]....children()[in]``. + For the root, the path is ``()``. + """ + return self._collapsed_trees[path_to_tree] + + def bind_click_trees(self, callback, button=1): + """ + Add a binding to all tree segments. + """ + for tseg in list(self._expanded_trees.values()): + tseg.bind_click(callback, button) + for tseg in list(self._collapsed_trees.values()): + tseg.bind_click(callback, button) + + def bind_drag_trees(self, callback, button=1): + """ + Add a binding to all tree segments. + """ + for tseg in list(self._expanded_trees.values()): + tseg.bind_drag(callback, button) + for tseg in list(self._collapsed_trees.values()): + tseg.bind_drag(callback, button) + + def bind_click_leaves(self, callback, button=1): + """ + Add a binding to all leaves. + """ + for leaf in self._leaves: + leaf.bind_click(callback, button) + for leaf in self._leaves: + leaf.bind_click(callback, button) + + def bind_drag_leaves(self, callback, button=1): + """ + Add a binding to all leaves. + """ + for leaf in self._leaves: + leaf.bind_drag(callback, button) + for leaf in self._leaves: + leaf.bind_drag(callback, button) + + def bind_click_nodes(self, callback, button=1): + """ + Add a binding to all nodes. + """ + for node in self._nodes: + node.bind_click(callback, button) + for node in self._nodes: + node.bind_click(callback, button) + + def bind_drag_nodes(self, callback, button=1): + """ + Add a binding to all nodes. + """ + for node in self._nodes: + node.bind_drag(callback, button) + for node in self._nodes: + node.bind_drag(callback, button) + + def _make_collapsed_trees(self, canvas, t, key): + if not isinstance(t, Tree): + return + make_node = self._make_node + make_leaf = self._make_leaf + + node = make_node(canvas, t.label(), **self._nodeattribs) + self._nodes.append(node) + leaves = [make_leaf(canvas, l, **self._leafattribs) for l in t.leaves()] + self._leaves += leaves + treeseg = TreeSegmentWidget( + canvas, + node, + leaves, + roof=1, + color=self._roof_color, + fill=self._roof_fill, + width=self._line_width, + ) + + self._collapsed_trees[key] = treeseg + self._keys[treeseg] = key + # self._add_child_widget(treeseg) + treeseg.hide() + + # Build trees for children. + for i in range(len(t)): + child = t[i] + self._make_collapsed_trees(canvas, child, key + (i,)) + + def _make_expanded_tree(self, canvas, t, key): + make_node = self._make_node + make_leaf = self._make_leaf + + if isinstance(t, Tree): + node = make_node(canvas, t.label(), **self._nodeattribs) + self._nodes.append(node) + children = t + subtrees = [ + self._make_expanded_tree(canvas, children[i], key + (i,)) + for i in range(len(children)) + ] + treeseg = TreeSegmentWidget( + canvas, node, subtrees, color=self._line_color, width=self._line_width + ) + self._expanded_trees[key] = treeseg + self._keys[treeseg] = key + return treeseg + else: + leaf = make_leaf(canvas, t, **self._leafattribs) + self._leaves.append(leaf) + return leaf + + def __setitem__(self, attr, value): + if attr[:5] == "node_": + for node in self._nodes: + node[attr[5:]] = value + elif attr[:5] == "leaf_": + for leaf in self._leaves: + leaf[attr[5:]] = value + elif attr == "line_color": + self._line_color = value + for tseg in list(self._expanded_trees.values()): + tseg["color"] = value + elif attr == "line_width": + self._line_width = value + for tseg in list(self._expanded_trees.values()): + tseg["width"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["width"] = value + elif attr == "roof_color": + self._roof_color = value + for tseg in list(self._collapsed_trees.values()): + tseg["color"] = value + elif attr == "roof_fill": + self._roof_fill = value + for tseg in list(self._collapsed_trees.values()): + tseg["fill"] = value + elif attr == "shapeable": + self._shapeable = value + for tseg in list(self._expanded_trees.values()): + tseg["draggable"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["draggable"] = value + for leaf in self._leaves: + leaf["draggable"] = value + elif attr == "xspace": + self._xspace = value + for tseg in list(self._expanded_trees.values()): + tseg["xspace"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["xspace"] = value + self.manage() + elif attr == "yspace": + self._yspace = value + for tseg in list(self._expanded_trees.values()): + tseg["yspace"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["yspace"] = value + self.manage() + elif attr == "orientation": + self._orientation = value + for tseg in list(self._expanded_trees.values()): + tseg["orientation"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["orientation"] = value + self.manage() + elif attr == "ordered": + self._ordered = value + for tseg in list(self._expanded_trees.values()): + tseg["ordered"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["ordered"] = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr[:5] == "node_": + return self._nodeattribs.get(attr[5:], None) + elif attr[:5] == "leaf_": + return self._leafattribs.get(attr[5:], None) + elif attr[:4] == "loc_": + return self._locattribs.get(attr[4:], None) + elif attr == "line_color": + return self._line_color + elif attr == "line_width": + return self._line_width + elif attr == "roof_color": + return self._roof_color + elif attr == "roof_fill": + return self._roof_fill + elif attr == "shapeable": + return self._shapeable + elif attr == "xspace": + return self._xspace + elif attr == "yspace": + return self._yspace + elif attr == "orientation": + return self._orientation + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [] + + def _manage(self): + segs = list(self._expanded_trees.values()) + list( + self._collapsed_trees.values() + ) + for tseg in segs: + if tseg.hidden(): + tseg.show() + tseg.manage() + tseg.hide() + + def toggle_collapsed(self, treeseg): + """ + Collapse/expand a tree. + """ + old_treeseg = treeseg + if old_treeseg["roof"]: + new_treeseg = self._expanded_trees[self._keys[old_treeseg]] + else: + new_treeseg = self._collapsed_trees[self._keys[old_treeseg]] + + # Replace the old tree with the new tree. + if old_treeseg.parent() is self: + self._remove_child_widget(old_treeseg) + self._add_child_widget(new_treeseg) + self._treeseg = new_treeseg + else: + old_treeseg.parent().replace_child(old_treeseg, new_treeseg) + + # Move the new tree to where the old tree was. Show it first, + # so we can find its bounding box. + new_treeseg.show() + (newx, newy) = new_treeseg.label().bbox()[:2] + (oldx, oldy) = old_treeseg.label().bbox()[:2] + new_treeseg.move(oldx - newx, oldy - newy) + + # Hide the old tree + old_treeseg.hide() + + # We could do parent.manage() here instead, if we wanted. + new_treeseg.parent().update(new_treeseg) + + +##////////////////////////////////////////////////////// +## draw_trees +##////////////////////////////////////////////////////// + + +class TreeView: + def __init__(self, *trees): + from math import ceil, sqrt + + self._trees = trees + + self._top = Tk() + self._top.title("NLTK") + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + + cf = self._cframe = CanvasFrame(self._top) + self._top.bind("", self._cframe.print_to_file) + + # Size is variable. + self._size = IntVar(self._top) + self._size.set(12) + bold = ("helvetica", -self._size.get(), "bold") + helv = ("helvetica", -self._size.get()) + + # Lay the trees out in a square. + self._width = int(ceil(sqrt(len(trees)))) + self._widgets = [] + for i in range(len(trees)): + widget = TreeWidget( + cf.canvas(), + trees[i], + node_font=bold, + leaf_color="#008040", + node_color="#004080", + roof_color="#004040", + roof_fill="white", + line_color="#004040", + draggable=1, + leaf_font=helv, + ) + widget.bind_click_trees(widget.toggle_collapsed) + self._widgets.append(widget) + cf.add_widget(widget, 0, 0) + + self._layout() + self._cframe.pack(expand=1, fill="both") + self._init_menubar() + + def _layout(self): + i = x = y = ymax = 0 + width = self._width + for i in range(len(self._widgets)): + widget = self._widgets[i] + (oldx, oldy) = widget.bbox()[:2] + if i % width == 0: + y = ymax + x = 0 + widget.move(x - oldx, y - oldy) + x = widget.bbox()[2] + 10 + ymax = max(ymax, widget.bbox()[3] + 10) + + def _init_menubar(self): + menubar = Menu(self._top) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self._cframe.print_to_file, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + zoommenu = Menu(menubar, tearoff=0) + zoommenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=28, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=50, + command=self.resize, + ) + menubar.add_cascade(label="Zoom", underline=0, menu=zoommenu) + + self._top.config(menu=menubar) + + def resize(self, *e): + bold = ("helvetica", -self._size.get(), "bold") + helv = ("helvetica", -self._size.get()) + xspace = self._size.get() + yspace = self._size.get() + for widget in self._widgets: + widget["node_font"] = bold + widget["leaf_font"] = helv + widget["xspace"] = xspace + widget["yspace"] = yspace + if self._size.get() < 20: + widget["line_width"] = 1 + elif self._size.get() < 30: + widget["line_width"] = 2 + else: + widget["line_width"] = 3 + self._layout() + + def destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + +def draw_trees(*trees): + """ + Open a new window containing a graphical diagram of the given + trees. + + :rtype: None + """ + TreeView(*trees).mainloop() + return + + +##////////////////////////////////////////////////////// +## Demo Code +##////////////////////////////////////////////////////// + + +def demo(): + import random + + def fill(cw): + cw["fill"] = "#%06d" % random.randint(0, 999999) + + cf = CanvasFrame(width=550, height=450, closeenough=2) + + t = Tree.fromstring( + """ + (S (NP the very big cat) + (VP (Adv sorta) (V saw) (NP (Det the) (N dog))))""" + ) + + tc = TreeWidget( + cf.canvas(), + t, + draggable=1, + node_font=("helvetica", -14, "bold"), + leaf_font=("helvetica", -12, "italic"), + roof_fill="white", + roof_color="black", + leaf_color="green4", + node_color="blue2", + ) + cf.add_widget(tc, 10, 10) + + def boxit(canvas, text): + big = ("helvetica", -16, "bold") + return BoxWidget(canvas, TextWidget(canvas, text, font=big), fill="green") + + def ovalit(canvas, text): + return OvalWidget(canvas, TextWidget(canvas, text), fill="cyan") + + treetok = Tree.fromstring("(S (NP this tree) (VP (V is) (AdjP shapeable)))") + tc2 = TreeWidget(cf.canvas(), treetok, boxit, ovalit, shapeable=1) + + def color(node): + node["color"] = "#%04d00" % random.randint(0, 9999) + + def color2(treeseg): + treeseg.label()["fill"] = "#%06d" % random.randint(0, 9999) + treeseg.label().child()["color"] = "white" + + tc.bind_click_trees(tc.toggle_collapsed) + tc2.bind_click_trees(tc2.toggle_collapsed) + tc.bind_click_nodes(color, 3) + tc2.expanded_tree(1).bind_click(color2, 3) + tc2.expanded_tree().bind_click(color2, 3) + + paren = ParenWidget(cf.canvas(), tc2) + cf.add_widget(paren, tc.bbox()[2] + 10, 10) + + tree3 = Tree.fromstring( + """ + (S (NP this tree) (AUX was) + (VP (V built) (PP (P with) (NP (N tree_to_treesegment)))))""" + ) + tc3 = tree_to_treesegment( + cf.canvas(), tree3, tree_color="green4", tree_xspace=2, tree_width=2 + ) + tc3["draggable"] = 1 + cf.add_widget(tc3, 10, tc.bbox()[3] + 10) + + def orientswitch(treewidget): + if treewidget["orientation"] == "horizontal": + treewidget.expanded_tree(1, 1).subtrees()[0].set_text("vertical") + treewidget.collapsed_tree(1, 1).subtrees()[0].set_text("vertical") + treewidget.collapsed_tree(1).subtrees()[1].set_text("vertical") + treewidget.collapsed_tree().subtrees()[3].set_text("vertical") + treewidget["orientation"] = "vertical" + else: + treewidget.expanded_tree(1, 1).subtrees()[0].set_text("horizontal") + treewidget.collapsed_tree(1, 1).subtrees()[0].set_text("horizontal") + treewidget.collapsed_tree(1).subtrees()[1].set_text("horizontal") + treewidget.collapsed_tree().subtrees()[3].set_text("horizontal") + treewidget["orientation"] = "horizontal" + + text = """ +Try clicking, right clicking, and dragging +different elements of each of the trees. +The top-left tree is a TreeWidget built from +a Tree. The top-right is a TreeWidget built +from a Tree, using non-default widget +constructors for the nodes & leaves (BoxWidget +and OvalWidget). The bottom-left tree is +built from tree_to_treesegment.""" + twidget = TextWidget(cf.canvas(), text.strip()) + textbox = BoxWidget(cf.canvas(), twidget, fill="white", draggable=1) + cf.add_widget(textbox, tc3.bbox()[2] + 10, tc2.bbox()[3] + 10) + + tree4 = Tree.fromstring("(S (NP this tree) (VP (V is) (Adj horizontal)))") + tc4 = TreeWidget( + cf.canvas(), + tree4, + draggable=1, + line_color="brown2", + roof_color="brown2", + node_font=("helvetica", -12, "bold"), + node_color="brown4", + orientation="horizontal", + ) + tc4.manage() + cf.add_widget(tc4, tc3.bbox()[2] + 10, textbox.bbox()[3] + 10) + tc4.bind_click(orientswitch) + tc4.bind_click_trees(tc4.toggle_collapsed, 3) + + # Run mainloop + cf.mainloop() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/draw/util.py b/lib/python3.10/site-packages/nltk/draw/util.py new file mode 100644 index 0000000000000000000000000000000000000000..31ae442099a892a6e84a0dbf3ff284d7aa184b3f --- /dev/null +++ b/lib/python3.10/site-packages/nltk/draw/util.py @@ -0,0 +1,2575 @@ +# Natural Language Toolkit: Drawing utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Tools for graphically displaying and interacting with the objects and +processing classes defined by the Toolkit. These tools are primarily +intended to help students visualize the objects that they create. + +The graphical tools are typically built using "canvas widgets", each +of which encapsulates the graphical elements and bindings used to +display a complex object on a Tkinter ``Canvas``. For example, NLTK +defines canvas widgets for displaying trees and directed graphs, as +well as a number of simpler widgets. These canvas widgets make it +easier to build new graphical tools and demos. See the class +documentation for ``CanvasWidget`` for more information. + +The ``nltk.draw`` module defines the abstract ``CanvasWidget`` base +class, and a number of simple canvas widgets. The remaining canvas +widgets are defined by submodules, such as ``nltk.draw.tree``. + +The ``nltk.draw`` module also defines ``CanvasFrame``, which +encapsulates a ``Canvas`` and its scrollbars. It uses a +``ScrollWatcherWidget`` to ensure that all canvas widgets contained on +its canvas are within the scroll region. + +Acknowledgements: Many of the ideas behind the canvas widget system +are derived from ``CLIG``, a Tk-based grapher for linguistic data +structures. For more information, see the CLIG +homepage (http://www.ags.uni-sb.de/~konrad/clig.html). + +""" +from abc import ABCMeta, abstractmethod +from tkinter import ( + RAISED, + Button, + Canvas, + Entry, + Frame, + Label, + Menu, + Menubutton, + Scrollbar, + StringVar, + Text, + Tk, + Toplevel, + Widget, +) +from tkinter.filedialog import asksaveasfilename + +from nltk.util import in_idle + +##////////////////////////////////////////////////////// +## CanvasWidget +##////////////////////////////////////////////////////// + + +class CanvasWidget(metaclass=ABCMeta): + """ + A collection of graphical elements and bindings used to display a + complex object on a Tkinter ``Canvas``. A canvas widget is + responsible for managing the ``Canvas`` tags and callback bindings + necessary to display and interact with the object. Canvas widgets + are often organized into hierarchies, where parent canvas widgets + control aspects of their child widgets. + + Each canvas widget is bound to a single ``Canvas``. This ``Canvas`` + is specified as the first argument to the ``CanvasWidget``'s + constructor. + + Attributes. Each canvas widget can support a variety of + "attributes", which control how the canvas widget is displayed. + Some typical examples attributes are ``color``, ``font``, and + ``radius``. Each attribute has a default value. This default + value can be overridden in the constructor, using keyword + arguments of the form ``attribute=value``: + + >>> from nltk.draw.util import TextWidget + >>> cn = TextWidget(Canvas(), 'test', color='red') # doctest: +SKIP + + Attribute values can also be changed after a canvas widget has + been constructed, using the ``__setitem__`` operator: + + >>> cn['font'] = 'times' # doctest: +SKIP + + The current value of an attribute value can be queried using the + ``__getitem__`` operator: + + >>> cn['color'] # doctest: +SKIP + 'red' + + For a list of the attributes supported by a type of canvas widget, + see its class documentation. + + Interaction. The attribute ``'draggable'`` controls whether the + user can drag a canvas widget around the canvas. By default, + canvas widgets are not draggable. + + ``CanvasWidget`` provides callback support for two types of user + interaction: clicking and dragging. The method ``bind_click`` + registers a callback function that is called whenever the canvas + widget is clicked. The method ``bind_drag`` registers a callback + function that is called after the canvas widget is dragged. If + the user clicks or drags a canvas widget with no registered + callback function, then the interaction event will propagate to + its parent. For each canvas widget, only one callback function + may be registered for an interaction event. Callback functions + can be deregistered with the ``unbind_click`` and ``unbind_drag`` + methods. + + Subclassing. ``CanvasWidget`` is an abstract class. Subclasses + are required to implement the following methods: + + - ``__init__``: Builds a new canvas widget. It must perform the + following three tasks (in order): + + - Create any new graphical elements. + - Call ``_add_child_widget`` on each child widget. + - Call the ``CanvasWidget`` constructor. + - ``_tags``: Returns a list of the canvas tags for all graphical + elements managed by this canvas widget, not including + graphical elements managed by its child widgets. + - ``_manage``: Arranges the child widgets of this canvas widget. + This is typically only called when the canvas widget is + created. + - ``_update``: Update this canvas widget in response to a + change in a single child. + + For a ``CanvasWidget`` with no child widgets, the default + definitions for ``_manage`` and ``_update`` may be used. + + If a subclass defines any attributes, then it should implement + ``__getitem__`` and ``__setitem__``. If either of these methods is + called with an unknown attribute, then they should propagate the + request to ``CanvasWidget``. + + Most subclasses implement a number of additional methods that + modify the ``CanvasWidget`` in some way. These methods must call + ``parent.update(self)`` after making any changes to the canvas + widget's graphical elements. The canvas widget must also call + ``parent.update(self)`` after changing any attribute value that + affects the shape or position of the canvas widget's graphical + elements. + + :type __canvas: Tkinter.Canvas + :ivar __canvas: This ``CanvasWidget``'s canvas. + + :type __parent: CanvasWidget or None + :ivar __parent: This ``CanvasWidget``'s hierarchical parent widget. + :type __children: list(CanvasWidget) + :ivar __children: This ``CanvasWidget``'s hierarchical child widgets. + + :type __updating: bool + :ivar __updating: Is this canvas widget currently performing an + update? If it is, then it will ignore any new update requests + from child widgets. + + :type __draggable: bool + :ivar __draggable: Is this canvas widget draggable? + :type __press: event + :ivar __press: The ButtonPress event that we're currently handling. + :type __drag_x: int + :ivar __drag_x: Where it's been moved to (to find dx) + :type __drag_y: int + :ivar __drag_y: Where it's been moved to (to find dy) + :type __callbacks: dictionary + :ivar __callbacks: Registered callbacks. Currently, four keys are + used: ``1``, ``2``, ``3``, and ``'drag'``. The values are + callback functions. Each callback function takes a single + argument, which is the ``CanvasWidget`` that triggered the + callback. + """ + + def __init__(self, canvas, parent=None, **attribs): + """ + Create a new canvas widget. This constructor should only be + called by subclass constructors; and it should be called only + "after" the subclass has constructed all graphical canvas + objects and registered all child widgets. + + :param canvas: This canvas widget's canvas. + :type canvas: Tkinter.Canvas + :param parent: This canvas widget's hierarchical parent. + :type parent: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + if self.__class__ == CanvasWidget: + raise TypeError("CanvasWidget is an abstract base class") + + if not isinstance(canvas, Canvas): + raise TypeError("Expected a canvas!") + + self.__canvas = canvas + self.__parent = parent + + # If the subclass constructor called _add_child_widget, then + # self.__children will already exist. + if not hasattr(self, "_CanvasWidget__children"): + self.__children = [] + + # Is this widget hidden? + self.__hidden = 0 + + # Update control (prevents infinite loops) + self.__updating = 0 + + # Button-press and drag callback handling. + self.__press = None + self.__drag_x = self.__drag_y = 0 + self.__callbacks = {} + self.__draggable = 0 + + # Set up attributes. + for (attr, value) in list(attribs.items()): + self[attr] = value + + # Manage this canvas widget + self._manage() + + # Register any new bindings + for tag in self._tags(): + self.__canvas.tag_bind(tag, "", self.__press_cb) + self.__canvas.tag_bind(tag, "", self.__press_cb) + self.__canvas.tag_bind(tag, "", self.__press_cb) + + ##////////////////////////////////////////////////////// + ## Inherited methods. + ##////////////////////////////////////////////////////// + + def bbox(self): + """ + :return: A bounding box for this ``CanvasWidget``. The bounding + box is a tuple of four coordinates, *(xmin, ymin, xmax, ymax)*, + for a rectangle which encloses all of the canvas + widget's graphical elements. Bounding box coordinates are + specified with respect to the coordinate space of the ``Canvas``. + :rtype: tuple(int, int, int, int) + """ + if self.__hidden: + return (0, 0, 0, 0) + if len(self.tags()) == 0: + raise ValueError("No tags") + return self.__canvas.bbox(*self.tags()) + + def width(self): + """ + :return: The width of this canvas widget's bounding box, in + its ``Canvas``'s coordinate space. + :rtype: int + """ + if len(self.tags()) == 0: + raise ValueError("No tags") + bbox = self.__canvas.bbox(*self.tags()) + return bbox[2] - bbox[0] + + def height(self): + """ + :return: The height of this canvas widget's bounding box, in + its ``Canvas``'s coordinate space. + :rtype: int + """ + if len(self.tags()) == 0: + raise ValueError("No tags") + bbox = self.__canvas.bbox(*self.tags()) + return bbox[3] - bbox[1] + + def parent(self): + """ + :return: The hierarchical parent of this canvas widget. + ``self`` is considered a subpart of its parent for + purposes of user interaction. + :rtype: CanvasWidget or None + """ + return self.__parent + + def child_widgets(self): + """ + :return: A list of the hierarchical children of this canvas + widget. These children are considered part of ``self`` + for purposes of user interaction. + :rtype: list of CanvasWidget + """ + return self.__children + + def canvas(self): + """ + :return: The canvas that this canvas widget is bound to. + :rtype: Tkinter.Canvas + """ + return self.__canvas + + def move(self, dx, dy): + """ + Move this canvas widget by a given distance. In particular, + shift the canvas widget right by ``dx`` pixels, and down by + ``dy`` pixels. Both ``dx`` and ``dy`` may be negative, resulting + in leftward or upward movement. + + :type dx: int + :param dx: The number of pixels to move this canvas widget + rightwards. + :type dy: int + :param dy: The number of pixels to move this canvas widget + downwards. + :rtype: None + """ + if dx == dy == 0: + return + for tag in self.tags(): + self.__canvas.move(tag, dx, dy) + if self.__parent: + self.__parent.update(self) + + def moveto(self, x, y, anchor="NW"): + """ + Move this canvas widget to the given location. In particular, + shift the canvas widget such that the corner or side of the + bounding box specified by ``anchor`` is at location (``x``, + ``y``). + + :param x,y: The location that the canvas widget should be moved + to. + :param anchor: The corner or side of the canvas widget that + should be moved to the specified location. ``'N'`` + specifies the top center; ``'NE'`` specifies the top right + corner; etc. + """ + x1, y1, x2, y2 = self.bbox() + if anchor == "NW": + self.move(x - x1, y - y1) + if anchor == "N": + self.move(x - x1 / 2 - x2 / 2, y - y1) + if anchor == "NE": + self.move(x - x2, y - y1) + if anchor == "E": + self.move(x - x2, y - y1 / 2 - y2 / 2) + if anchor == "SE": + self.move(x - x2, y - y2) + if anchor == "S": + self.move(x - x1 / 2 - x2 / 2, y - y2) + if anchor == "SW": + self.move(x - x1, y - y2) + if anchor == "W": + self.move(x - x1, y - y1 / 2 - y2 / 2) + + def destroy(self): + """ + Remove this ``CanvasWidget`` from its ``Canvas``. After a + ``CanvasWidget`` has been destroyed, it should not be accessed. + + Note that you only need to destroy a top-level + ``CanvasWidget``; its child widgets will be destroyed + automatically. If you destroy a non-top-level + ``CanvasWidget``, then the entire top-level widget will be + destroyed. + + :raise ValueError: if this ``CanvasWidget`` has a parent. + :rtype: None + """ + if self.__parent is not None: + self.__parent.destroy() + return + + for tag in self.tags(): + self.__canvas.tag_unbind(tag, "") + self.__canvas.tag_unbind(tag, "") + self.__canvas.tag_unbind(tag, "") + self.__canvas.delete(*self.tags()) + self.__canvas = None + + def update(self, child): + """ + Update the graphical display of this canvas widget, and all of + its ancestors, in response to a change in one of this canvas + widget's children. + + :param child: The child widget that changed. + :type child: CanvasWidget + """ + if self.__hidden or child.__hidden: + return + # If we're already updating, then do nothing. This prevents + # infinite loops when _update modifies its children. + if self.__updating: + return + self.__updating = 1 + + # Update this CanvasWidget. + self._update(child) + + # Propagate update request to the parent. + if self.__parent: + self.__parent.update(self) + + # We're done updating. + self.__updating = 0 + + def manage(self): + """ + Arrange this canvas widget and all of its descendants. + + :rtype: None + """ + if self.__hidden: + return + for child in self.__children: + child.manage() + self._manage() + + def tags(self): + """ + :return: a list of the canvas tags for all graphical + elements managed by this canvas widget, including + graphical elements managed by its child widgets. + :rtype: list of int + """ + if self.__canvas is None: + raise ValueError("Attempt to access a destroyed canvas widget") + tags = [] + tags += self._tags() + for child in self.__children: + tags += child.tags() + return tags + + def __setitem__(self, attr, value): + """ + Set the value of the attribute ``attr`` to ``value``. See the + class documentation for a list of attributes supported by this + canvas widget. + + :rtype: None + """ + if attr == "draggable": + self.__draggable = value + else: + raise ValueError("Unknown attribute %r" % attr) + + def __getitem__(self, attr): + """ + :return: the value of the attribute ``attr``. See the class + documentation for a list of attributes supported by this + canvas widget. + :rtype: (any) + """ + if attr == "draggable": + return self.__draggable + else: + raise ValueError("Unknown attribute %r" % attr) + + def __repr__(self): + """ + :return: a string representation of this canvas widget. + :rtype: str + """ + return "<%s>" % self.__class__.__name__ + + def hide(self): + """ + Temporarily hide this canvas widget. + + :rtype: None + """ + self.__hidden = 1 + for tag in self.tags(): + self.__canvas.itemconfig(tag, state="hidden") + + def show(self): + """ + Show a hidden canvas widget. + + :rtype: None + """ + self.__hidden = 0 + for tag in self.tags(): + self.__canvas.itemconfig(tag, state="normal") + + def hidden(self): + """ + :return: True if this canvas widget is hidden. + :rtype: bool + """ + return self.__hidden + + ##////////////////////////////////////////////////////// + ## Callback interface + ##////////////////////////////////////////////////////// + + def bind_click(self, callback, button=1): + """ + Register a new callback that will be called whenever this + ``CanvasWidget`` is clicked on. + + :type callback: function + :param callback: The callback function that will be called + whenever this ``CanvasWidget`` is clicked. This function + will be called with this ``CanvasWidget`` as its argument. + :type button: int + :param button: Which button the user should use to click on + this ``CanvasWidget``. Typically, this should be 1 (left + button), 3 (right button), or 2 (middle button). + """ + self.__callbacks[button] = callback + + def bind_drag(self, callback): + """ + Register a new callback that will be called after this + ``CanvasWidget`` is dragged. This implicitly makes this + ``CanvasWidget`` draggable. + + :type callback: function + :param callback: The callback function that will be called + whenever this ``CanvasWidget`` is clicked. This function + will be called with this ``CanvasWidget`` as its argument. + """ + self.__draggable = 1 + self.__callbacks["drag"] = callback + + def unbind_click(self, button=1): + """ + Remove a callback that was registered with ``bind_click``. + + :type button: int + :param button: Which button the user should use to click on + this ``CanvasWidget``. Typically, this should be 1 (left + button), 3 (right button), or 2 (middle button). + """ + try: + del self.__callbacks[button] + except: + pass + + def unbind_drag(self): + """ + Remove a callback that was registered with ``bind_drag``. + """ + try: + del self.__callbacks["drag"] + except: + pass + + ##////////////////////////////////////////////////////// + ## Callback internals + ##////////////////////////////////////////////////////// + + def __press_cb(self, event): + """ + Handle a button-press event: + - record the button press event in ``self.__press`` + - register a button-release callback. + - if this CanvasWidget or any of its ancestors are + draggable, then register the appropriate motion callback. + """ + # If we're already waiting for a button release, then ignore + # this new button press. + if ( + self.__canvas.bind("") + or self.__canvas.bind("") + or self.__canvas.bind("") + ): + return + + # Unbind motion (just in case; this shouldn't be necessary) + self.__canvas.unbind("") + + # Record the button press event. + self.__press = event + + # If any ancestor is draggable, set up a motion callback. + # (Only if they pressed button number 1) + if event.num == 1: + widget = self + while widget is not None: + if widget["draggable"]: + widget.__start_drag(event) + break + widget = widget.parent() + + # Set up the button release callback. + self.__canvas.bind("" % event.num, self.__release_cb) + + def __start_drag(self, event): + """ + Begin dragging this object: + - register a motion callback + - record the drag coordinates + """ + self.__canvas.bind("", self.__motion_cb) + self.__drag_x = event.x + self.__drag_y = event.y + + def __motion_cb(self, event): + """ + Handle a motion event: + - move this object to the new location + - record the new drag coordinates + """ + self.move(event.x - self.__drag_x, event.y - self.__drag_y) + self.__drag_x = event.x + self.__drag_y = event.y + + def __release_cb(self, event): + """ + Handle a release callback: + - unregister motion & button release callbacks. + - decide whether they clicked, dragged, or cancelled + - call the appropriate handler. + """ + # Unbind the button release & motion callbacks. + self.__canvas.unbind("" % event.num) + self.__canvas.unbind("") + + # Is it a click or a drag? + if ( + event.time - self.__press.time < 100 + and abs(event.x - self.__press.x) + abs(event.y - self.__press.y) < 5 + ): + # Move it back, if we were dragging. + if self.__draggable and event.num == 1: + self.move( + self.__press.x - self.__drag_x, self.__press.y - self.__drag_y + ) + self.__click(event.num) + elif event.num == 1: + self.__drag() + + self.__press = None + + def __drag(self): + """ + If this ``CanvasWidget`` has a drag callback, then call it; + otherwise, find the closest ancestor with a drag callback, and + call it. If no ancestors have a drag callback, do nothing. + """ + if self.__draggable: + if "drag" in self.__callbacks: + cb = self.__callbacks["drag"] + try: + cb(self) + except: + print("Error in drag callback for %r" % self) + elif self.__parent is not None: + self.__parent.__drag() + + def __click(self, button): + """ + If this ``CanvasWidget`` has a drag callback, then call it; + otherwise, find the closest ancestor with a click callback, and + call it. If no ancestors have a click callback, do nothing. + """ + if button in self.__callbacks: + cb = self.__callbacks[button] + # try: + cb(self) + # except: + # print('Error in click callback for %r' % self) + # raise + elif self.__parent is not None: + self.__parent.__click(button) + + ##////////////////////////////////////////////////////// + ## Child/parent Handling + ##////////////////////////////////////////////////////// + + def _add_child_widget(self, child): + """ + Register a hierarchical child widget. The child will be + considered part of this canvas widget for purposes of user + interaction. ``_add_child_widget`` has two direct effects: + - It sets ``child``'s parent to this canvas widget. + - It adds ``child`` to the list of canvas widgets returned by + the ``child_widgets`` member function. + + :param child: The new child widget. ``child`` must not already + have a parent. + :type child: CanvasWidget + """ + if not hasattr(self, "_CanvasWidget__children"): + self.__children = [] + if child.__parent is not None: + raise ValueError(f"{child} already has a parent") + child.__parent = self + self.__children.append(child) + + def _remove_child_widget(self, child): + """ + Remove a hierarchical child widget. This child will no longer + be considered part of this canvas widget for purposes of user + interaction. ``_add_child_widget`` has two direct effects: + - It sets ``child``'s parent to None. + - It removes ``child`` from the list of canvas widgets + returned by the ``child_widgets`` member function. + + :param child: The child widget to remove. ``child`` must be a + child of this canvas widget. + :type child: CanvasWidget + """ + self.__children.remove(child) + child.__parent = None + + ##////////////////////////////////////////////////////// + ## Defined by subclass + ##////////////////////////////////////////////////////// + + @abstractmethod + def _tags(self): + """ + :return: a list of canvas tags for all graphical elements + managed by this canvas widget, not including graphical + elements managed by its child widgets. + :rtype: list of int + """ + + def _manage(self): + """ + Arrange the child widgets of this canvas widget. This method + is called when the canvas widget is initially created. It is + also called if the user calls the ``manage`` method on this + canvas widget or any of its ancestors. + + :rtype: None + """ + + def _update(self, child): + """ + Update this canvas widget in response to a change in one of + its children. + + :param child: The child that changed. + :type child: CanvasWidget + :rtype: None + """ + + +##////////////////////////////////////////////////////// +## Basic widgets. +##////////////////////////////////////////////////////// + + +class TextWidget(CanvasWidget): + """ + A canvas widget that displays a single string of text. + + Attributes: + - ``color``: the color of the text. + - ``font``: the font used to display the text. + - ``justify``: justification for multi-line texts. Valid values + are ``left``, ``center``, and ``right``. + - ``width``: the width of the text. If the text is wider than + this width, it will be line-wrapped at whitespace. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, text, **attribs): + """ + Create a new text widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type text: str + :param text: The string of text to display. + :param attribs: The new canvas widget's attributes. + """ + self._text = text + self._tag = canvas.create_text(1, 1, text=text) + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + if attr in ("color", "font", "justify", "width"): + if attr == "color": + attr = "fill" + self.canvas().itemconfig(self._tag, {attr: value}) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "width": + return int(self.canvas().itemcget(self._tag, attr)) + elif attr in ("color", "font", "justify"): + if attr == "color": + attr = "fill" + return self.canvas().itemcget(self._tag, attr) + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [self._tag] + + def text(self): + """ + :return: The text displayed by this text widget. + :rtype: str + """ + return self.canvas().itemcget(self._tag, "TEXT") + + def set_text(self, text): + """ + Change the text that is displayed by this text widget. + + :type text: str + :param text: The string of text to display. + :rtype: None + """ + self.canvas().itemconfig(self._tag, text=text) + if self.parent() is not None: + self.parent().update(self) + + def __repr__(self): + return "[Text: %r]" % self._text + + +class SymbolWidget(TextWidget): + """ + A canvas widget that displays special symbols, such as the + negation sign and the exists operator. Symbols are specified by + name. Currently, the following symbol names are defined: ``neg``, + ``disj``, ``conj``, ``lambda``, ``merge``, ``forall``, ``exists``, + ``subseteq``, ``subset``, ``notsubset``, ``emptyset``, ``imp``, + ``rightarrow``, ``equal``, ``notequal``, ``epsilon``. + + Attributes: + + - ``color``: the color of the text. + - ``draggable``: whether the text can be dragged by the user. + + :cvar SYMBOLS: A dictionary mapping from symbols to the character + in the ``symbol`` font used to render them. + """ + + SYMBOLS = { + "neg": "\330", + "disj": "\332", + "conj": "\331", + "lambda": "\154", + "merge": "\304", + "forall": "\042", + "exists": "\044", + "subseteq": "\315", + "subset": "\314", + "notsubset": "\313", + "emptyset": "\306", + "imp": "\336", + "rightarrow": chr(222), #'\256', + "equal": "\75", + "notequal": "\271", + "intersection": "\307", + "union": "\310", + "epsilon": "e", + } + + def __init__(self, canvas, symbol, **attribs): + """ + Create a new symbol widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type symbol: str + :param symbol: The name of the symbol to display. + :param attribs: The new canvas widget's attributes. + """ + attribs["font"] = "symbol" + TextWidget.__init__(self, canvas, "", **attribs) + self.set_symbol(symbol) + + def symbol(self): + """ + :return: the name of the symbol that is displayed by this + symbol widget. + :rtype: str + """ + return self._symbol + + def set_symbol(self, symbol): + """ + Change the symbol that is displayed by this symbol widget. + + :type symbol: str + :param symbol: The name of the symbol to display. + """ + if symbol not in SymbolWidget.SYMBOLS: + raise ValueError("Unknown symbol: %s" % symbol) + self._symbol = symbol + self.set_text(SymbolWidget.SYMBOLS[symbol]) + + def __repr__(self): + return "[Symbol: %r]" % self._symbol + + @staticmethod + def symbolsheet(size=20): + """ + Open a new Tkinter window that displays the entire alphabet + for the symbol font. This is useful for constructing the + ``SymbolWidget.SYMBOLS`` dictionary. + """ + top = Tk() + + def destroy(e, top=top): + top.destroy() + + top.bind("q", destroy) + Button(top, text="Quit", command=top.destroy).pack(side="bottom") + text = Text(top, font=("helvetica", -size), width=20, height=30) + text.pack(side="left") + sb = Scrollbar(top, command=text.yview) + text["yscrollcommand"] = sb.set + sb.pack(side="right", fill="y") + text.tag_config("symbol", font=("symbol", -size)) + for i in range(256): + if i in (0, 10): + continue # null and newline + for k, v in list(SymbolWidget.SYMBOLS.items()): + if v == chr(i): + text.insert("end", "%-10s\t" % k) + break + else: + text.insert("end", "%-10d \t" % i) + text.insert("end", "[%s]\n" % chr(i), "symbol") + top.mainloop() + + +class AbstractContainerWidget(CanvasWidget): + """ + An abstract class for canvas widgets that contain a single child, + such as ``BoxWidget`` and ``OvalWidget``. Subclasses must define + a constructor, which should create any new graphical elements and + then call the ``AbstractCanvasContainer`` constructor. Subclasses + must also define the ``_update`` method and the ``_tags`` method; + and any subclasses that define attributes should define + ``__setitem__`` and ``__getitem__``. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new container widget. This constructor should only + be called by subclass constructors. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The container's child widget. ``child`` must not + have a parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def _manage(self): + self._update(self._child) + + def child(self): + """ + :return: The child widget contained by this container widget. + :rtype: CanvasWidget + """ + return self._child + + def set_child(self, child): + """ + Change the child widget contained by this container widget. + + :param child: The new child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :rtype: None + """ + self._remove_child_widget(self._child) + self._add_child_widget(child) + self._child = child + self.update(child) + + def __repr__(self): + name = self.__class__.__name__ + if name[-6:] == "Widget": + name = name[:-6] + return f"[{name}: {self._child!r}]" + + +class BoxWidget(AbstractContainerWidget): + """ + A canvas widget that places a box around a child widget. + + Attributes: + - ``fill``: The color used to fill the interior of the box. + - ``outline``: The color used to draw the outline of the box. + - ``width``: The width of the outline of the box. + - ``margin``: The number of pixels space left between the child + and the box. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new box widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._margin = 1 + self._box = canvas.create_rectangle(1, 1, 1, 1) + canvas.tag_lower(self._box) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + if attr == "margin": + self._margin = value + elif attr in ("outline", "fill", "width"): + self.canvas().itemconfig(self._box, {attr: value}) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "margin": + return self._margin + elif attr == "width": + return float(self.canvas().itemcget(self._box, attr)) + elif attr in ("outline", "fill", "width"): + return self.canvas().itemcget(self._box, attr) + else: + return CanvasWidget.__getitem__(self, attr) + + def _update(self, child): + (x1, y1, x2, y2) = child.bbox() + margin = self._margin + self["width"] / 2 + self.canvas().coords( + self._box, x1 - margin, y1 - margin, x2 + margin, y2 + margin + ) + + def _tags(self): + return [self._box] + + +class OvalWidget(AbstractContainerWidget): + """ + A canvas widget that places a oval around a child widget. + + Attributes: + - ``fill``: The color used to fill the interior of the oval. + - ``outline``: The color used to draw the outline of the oval. + - ``width``: The width of the outline of the oval. + - ``margin``: The number of pixels space left between the child + and the oval. + - ``draggable``: whether the text can be dragged by the user. + - ``double``: If true, then a double-oval is drawn. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new oval widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._margin = 1 + self._oval = canvas.create_oval(1, 1, 1, 1) + self._circle = attribs.pop("circle", False) + self._double = attribs.pop("double", False) + if self._double: + self._oval2 = canvas.create_oval(1, 1, 1, 1) + else: + self._oval2 = None + canvas.tag_lower(self._oval) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + c = self.canvas() + if attr == "margin": + self._margin = value + elif attr == "double": + if value == True and self._oval2 is None: + # Copy attributes & position from self._oval. + x1, y1, x2, y2 = c.bbox(self._oval) + w = self["width"] * 2 + self._oval2 = c.create_oval( + x1 - w, + y1 - w, + x2 + w, + y2 + w, + outline=c.itemcget(self._oval, "outline"), + width=c.itemcget(self._oval, "width"), + ) + c.tag_lower(self._oval2) + if value == False and self._oval2 is not None: + c.delete(self._oval2) + self._oval2 = None + elif attr in ("outline", "fill", "width"): + c.itemconfig(self._oval, {attr: value}) + if self._oval2 is not None and attr != "fill": + c.itemconfig(self._oval2, {attr: value}) + if self._oval2 is not None and attr != "fill": + self.canvas().itemconfig(self._oval2, {attr: value}) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "margin": + return self._margin + elif attr == "double": + return self._double is not None + elif attr == "width": + return float(self.canvas().itemcget(self._oval, attr)) + elif attr in ("outline", "fill", "width"): + return self.canvas().itemcget(self._oval, attr) + else: + return CanvasWidget.__getitem__(self, attr) + + # The ratio between inscribed & circumscribed ovals + RATIO = 1.4142135623730949 + + def _update(self, child): + R = OvalWidget.RATIO + (x1, y1, x2, y2) = child.bbox() + margin = self._margin + + # If we're a circle, pretend our contents are square. + if self._circle: + dx, dy = abs(x1 - x2), abs(y1 - y2) + if dx > dy: + y = (y1 + y2) / 2 + y1, y2 = y - dx / 2, y + dx / 2 + elif dy > dx: + x = (x1 + x2) / 2 + x1, x2 = x - dy / 2, x + dy / 2 + + # Find the four corners. + left = int((x1 * (1 + R) + x2 * (1 - R)) / 2) + right = left + int((x2 - x1) * R) + top = int((y1 * (1 + R) + y2 * (1 - R)) / 2) + bot = top + int((y2 - y1) * R) + self.canvas().coords( + self._oval, left - margin, top - margin, right + margin, bot + margin + ) + if self._oval2 is not None: + self.canvas().coords( + self._oval2, + left - margin + 2, + top - margin + 2, + right + margin - 2, + bot + margin - 2, + ) + + def _tags(self): + if self._oval2 is None: + return [self._oval] + else: + return [self._oval, self._oval2] + + +class ParenWidget(AbstractContainerWidget): + """ + A canvas widget that places a pair of parenthases around a child + widget. + + Attributes: + - ``color``: The color used to draw the parenthases. + - ``width``: The width of the parenthases. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new parenthasis widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._oparen = canvas.create_arc(1, 1, 1, 1, style="arc", start=90, extent=180) + self._cparen = canvas.create_arc(1, 1, 1, 1, style="arc", start=-90, extent=180) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + if attr == "color": + self.canvas().itemconfig(self._oparen, outline=value) + self.canvas().itemconfig(self._cparen, outline=value) + elif attr == "width": + self.canvas().itemconfig(self._oparen, width=value) + self.canvas().itemconfig(self._cparen, width=value) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "color": + return self.canvas().itemcget(self._oparen, "outline") + elif attr == "width": + return self.canvas().itemcget(self._oparen, "width") + else: + return CanvasWidget.__getitem__(self, attr) + + def _update(self, child): + (x1, y1, x2, y2) = child.bbox() + width = max((y2 - y1) / 6, 4) + self.canvas().coords(self._oparen, x1 - width, y1, x1 + width, y2) + self.canvas().coords(self._cparen, x2 - width, y1, x2 + width, y2) + + def _tags(self): + return [self._oparen, self._cparen] + + +class BracketWidget(AbstractContainerWidget): + """ + A canvas widget that places a pair of brackets around a child + widget. + + Attributes: + - ``color``: The color used to draw the brackets. + - ``width``: The width of the brackets. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new bracket widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._obrack = canvas.create_line(1, 1, 1, 1, 1, 1, 1, 1) + self._cbrack = canvas.create_line(1, 1, 1, 1, 1, 1, 1, 1) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + if attr == "color": + self.canvas().itemconfig(self._obrack, fill=value) + self.canvas().itemconfig(self._cbrack, fill=value) + elif attr == "width": + self.canvas().itemconfig(self._obrack, width=value) + self.canvas().itemconfig(self._cbrack, width=value) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "color": + return self.canvas().itemcget(self._obrack, "outline") + elif attr == "width": + return self.canvas().itemcget(self._obrack, "width") + else: + return CanvasWidget.__getitem__(self, attr) + + def _update(self, child): + (x1, y1, x2, y2) = child.bbox() + width = max((y2 - y1) / 8, 2) + self.canvas().coords( + self._obrack, x1, y1, x1 - width, y1, x1 - width, y2, x1, y2 + ) + self.canvas().coords( + self._cbrack, x2, y1, x2 + width, y1, x2 + width, y2, x2, y2 + ) + + def _tags(self): + return [self._obrack, self._cbrack] + + +class SequenceWidget(CanvasWidget): + """ + A canvas widget that keeps a list of canvas widgets in a + horizontal line. + + Attributes: + - ``align``: The vertical alignment of the children. Possible + values are ``'top'``, ``'center'``, and ``'bottom'``. By + default, children are center-aligned. + - ``space``: The amount of horizontal space to place between + children. By default, one pixel of space is used. + - ``ordered``: If true, then keep the children in their + original order. + """ + + def __init__(self, canvas, *children, **attribs): + """ + Create a new sequence widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param children: The widgets that should be aligned + horizontally. Each child must not have a parent. + :type children: list(CanvasWidget) + :param attribs: The new canvas widget's attributes. + """ + self._align = "center" + self._space = 1 + self._ordered = False + self._children = list(children) + for child in children: + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + if attr == "align": + if value not in ("top", "bottom", "center"): + raise ValueError("Bad alignment: %r" % value) + self._align = value + elif attr == "space": + self._space = value + elif attr == "ordered": + self._ordered = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "align": + return self._align + elif attr == "space": + return self._space + elif attr == "ordered": + return self._ordered + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [] + + def _yalign(self, top, bot): + if self._align == "top": + return top + if self._align == "bottom": + return bot + if self._align == "center": + return (top + bot) / 2 + + def _update(self, child): + # Align all children with child. + (left, top, right, bot) = child.bbox() + y = self._yalign(top, bot) + for c in self._children: + (x1, y1, x2, y2) = c.bbox() + c.move(0, y - self._yalign(y1, y2)) + + if self._ordered and len(self._children) > 1: + index = self._children.index(child) + + x = right + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + if x > x1: + self._children[i].move(x - x1, 0) + x += x2 - x1 + self._space + + x = left - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + if x < x2: + self._children[i].move(x - x2, 0) + x -= x2 - x1 + self._space + + def _manage(self): + if len(self._children) == 0: + return + child = self._children[0] + + # Align all children with child. + (left, top, right, bot) = child.bbox() + y = self._yalign(top, bot) + + index = self._children.index(child) + + # Line up children to the right of child. + x = right + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - x1, y - self._yalign(y1, y2)) + x += x2 - x1 + self._space + + # Line up children to the left of child. + x = left - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - x2, y - self._yalign(y1, y2)) + x -= x2 - x1 + self._space + + def __repr__(self): + return "[Sequence: " + repr(self._children)[1:-1] + "]" + + # Provide an alias for the child_widgets() member. + children = CanvasWidget.child_widgets + + def replace_child(self, oldchild, newchild): + """ + Replace the child canvas widget ``oldchild`` with ``newchild``. + ``newchild`` must not have a parent. ``oldchild``'s parent will + be set to None. + + :type oldchild: CanvasWidget + :param oldchild: The child canvas widget to remove. + :type newchild: CanvasWidget + :param newchild: The canvas widget that should replace + ``oldchild``. + """ + index = self._children.index(oldchild) + self._children[index] = newchild + self._remove_child_widget(oldchild) + self._add_child_widget(newchild) + self.update(newchild) + + def remove_child(self, child): + """ + Remove the given child canvas widget. ``child``'s parent will + be set to None. + + :type child: CanvasWidget + :param child: The child canvas widget to remove. + """ + index = self._children.index(child) + del self._children[index] + self._remove_child_widget(child) + if len(self._children) > 0: + self.update(self._children[0]) + + def insert_child(self, index, child): + """ + Insert a child canvas widget before a given index. + + :type child: CanvasWidget + :param child: The canvas widget that should be inserted. + :type index: int + :param index: The index where the child widget should be + inserted. In particular, the index of ``child`` will be + ``index``; and the index of any children whose indices were + greater than equal to ``index`` before ``child`` was + inserted will be incremented by one. + """ + self._children.insert(index, child) + self._add_child_widget(child) + + +class StackWidget(CanvasWidget): + """ + A canvas widget that keeps a list of canvas widgets in a vertical + line. + + Attributes: + - ``align``: The horizontal alignment of the children. Possible + values are ``'left'``, ``'center'``, and ``'right'``. By + default, children are center-aligned. + - ``space``: The amount of vertical space to place between + children. By default, one pixel of space is used. + - ``ordered``: If true, then keep the children in their + original order. + """ + + def __init__(self, canvas, *children, **attribs): + """ + Create a new stack widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param children: The widgets that should be aligned + vertically. Each child must not have a parent. + :type children: list(CanvasWidget) + :param attribs: The new canvas widget's attributes. + """ + self._align = "center" + self._space = 1 + self._ordered = False + self._children = list(children) + for child in children: + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + if attr == "align": + if value not in ("left", "right", "center"): + raise ValueError("Bad alignment: %r" % value) + self._align = value + elif attr == "space": + self._space = value + elif attr == "ordered": + self._ordered = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "align": + return self._align + elif attr == "space": + return self._space + elif attr == "ordered": + return self._ordered + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [] + + def _xalign(self, left, right): + if self._align == "left": + return left + if self._align == "right": + return right + if self._align == "center": + return (left + right) / 2 + + def _update(self, child): + # Align all children with child. + (left, top, right, bot) = child.bbox() + x = self._xalign(left, right) + for c in self._children: + (x1, y1, x2, y2) = c.bbox() + c.move(x - self._xalign(x1, x2), 0) + + if self._ordered and len(self._children) > 1: + index = self._children.index(child) + + y = bot + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + if y > y1: + self._children[i].move(0, y - y1) + y += y2 - y1 + self._space + + y = top - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + if y < y2: + self._children[i].move(0, y - y2) + y -= y2 - y1 + self._space + + def _manage(self): + if len(self._children) == 0: + return + child = self._children[0] + + # Align all children with child. + (left, top, right, bot) = child.bbox() + x = self._xalign(left, right) + + index = self._children.index(child) + + # Line up children below the child. + y = bot + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - self._xalign(x1, x2), y - y1) + y += y2 - y1 + self._space + + # Line up children above the child. + y = top - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - self._xalign(x1, x2), y - y2) + y -= y2 - y1 + self._space + + def __repr__(self): + return "[Stack: " + repr(self._children)[1:-1] + "]" + + # Provide an alias for the child_widgets() member. + children = CanvasWidget.child_widgets + + def replace_child(self, oldchild, newchild): + """ + Replace the child canvas widget ``oldchild`` with ``newchild``. + ``newchild`` must not have a parent. ``oldchild``'s parent will + be set to None. + + :type oldchild: CanvasWidget + :param oldchild: The child canvas widget to remove. + :type newchild: CanvasWidget + :param newchild: The canvas widget that should replace + ``oldchild``. + """ + index = self._children.index(oldchild) + self._children[index] = newchild + self._remove_child_widget(oldchild) + self._add_child_widget(newchild) + self.update(newchild) + + def remove_child(self, child): + """ + Remove the given child canvas widget. ``child``'s parent will + be set to None. + + :type child: CanvasWidget + :param child: The child canvas widget to remove. + """ + index = self._children.index(child) + del self._children[index] + self._remove_child_widget(child) + if len(self._children) > 0: + self.update(self._children[0]) + + def insert_child(self, index, child): + """ + Insert a child canvas widget before a given index. + + :type child: CanvasWidget + :param child: The canvas widget that should be inserted. + :type index: int + :param index: The index where the child widget should be + inserted. In particular, the index of ``child`` will be + ``index``; and the index of any children whose indices were + greater than equal to ``index`` before ``child`` was + inserted will be incremented by one. + """ + self._children.insert(index, child) + self._add_child_widget(child) + + +class SpaceWidget(CanvasWidget): + """ + A canvas widget that takes up space but does not display + anything. A ``SpaceWidget`` can be used to add space between + elements. Each space widget is characterized by a width and a + height. If you wish to only create horizontal space, then use a + height of zero; and if you wish to only create vertical space, use + a width of zero. + """ + + def __init__(self, canvas, width, height, **attribs): + """ + Create a new space widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type width: int + :param width: The width of the new space widget. + :type height: int + :param height: The height of the new space widget. + :param attribs: The new canvas widget's attributes. + """ + # For some reason, + if width > 4: + width -= 4 + if height > 4: + height -= 4 + self._tag = canvas.create_line(1, 1, width, height, fill="") + CanvasWidget.__init__(self, canvas, **attribs) + + # note: width() and height() are already defined by CanvasWidget. + def set_width(self, width): + """ + Change the width of this space widget. + + :param width: The new width. + :type width: int + :rtype: None + """ + [x1, y1, x2, y2] = self.bbox() + self.canvas().coords(self._tag, x1, y1, x1 + width, y2) + + def set_height(self, height): + """ + Change the height of this space widget. + + :param height: The new height. + :type height: int + :rtype: None + """ + [x1, y1, x2, y2] = self.bbox() + self.canvas().coords(self._tag, x1, y1, x2, y1 + height) + + def _tags(self): + return [self._tag] + + def __repr__(self): + return "[Space]" + + +class ScrollWatcherWidget(CanvasWidget): + """ + A special canvas widget that adjusts its ``Canvas``'s scrollregion + to always include the bounding boxes of all of its children. The + scroll-watcher widget will only increase the size of the + ``Canvas``'s scrollregion; it will never decrease it. + """ + + def __init__(self, canvas, *children, **attribs): + """ + Create a new scroll-watcher widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type children: list(CanvasWidget) + :param children: The canvas widgets watched by the + scroll-watcher. The scroll-watcher will ensure that these + canvas widgets are always contained in their canvas's + scrollregion. + :param attribs: The new canvas widget's attributes. + """ + for child in children: + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def add_child(self, canvaswidget): + """ + Add a new canvas widget to the scroll-watcher. The + scroll-watcher will ensure that the new canvas widget is + always contained in its canvas's scrollregion. + + :param canvaswidget: The new canvas widget. + :type canvaswidget: CanvasWidget + :rtype: None + """ + self._add_child_widget(canvaswidget) + self.update(canvaswidget) + + def remove_child(self, canvaswidget): + """ + Remove a canvas widget from the scroll-watcher. The + scroll-watcher will no longer ensure that the new canvas + widget is always contained in its canvas's scrollregion. + + :param canvaswidget: The canvas widget to remove. + :type canvaswidget: CanvasWidget + :rtype: None + """ + self._remove_child_widget(canvaswidget) + + def _tags(self): + return [] + + def _update(self, child): + self._adjust_scrollregion() + + def _adjust_scrollregion(self): + """ + Adjust the scrollregion of this scroll-watcher's ``Canvas`` to + include the bounding boxes of all of its children. + """ + bbox = self.bbox() + canvas = self.canvas() + scrollregion = [int(n) for n in canvas["scrollregion"].split()] + if len(scrollregion) != 4: + return + if ( + bbox[0] < scrollregion[0] + or bbox[1] < scrollregion[1] + or bbox[2] > scrollregion[2] + or bbox[3] > scrollregion[3] + ): + scrollregion = "%d %d %d %d" % ( + min(bbox[0], scrollregion[0]), + min(bbox[1], scrollregion[1]), + max(bbox[2], scrollregion[2]), + max(bbox[3], scrollregion[3]), + ) + canvas["scrollregion"] = scrollregion + + +##////////////////////////////////////////////////////// +## Canvas Frame +##////////////////////////////////////////////////////// + + +class CanvasFrame: + """ + A ``Tkinter`` frame containing a canvas and scrollbars. + ``CanvasFrame`` uses a ``ScrollWatcherWidget`` to ensure that all of + the canvas widgets contained on its canvas are within its + scrollregion. In order for ``CanvasFrame`` to make these checks, + all canvas widgets must be registered with ``add_widget`` when they + are added to the canvas; and destroyed with ``destroy_widget`` when + they are no longer needed. + + If a ``CanvasFrame`` is created with no parent, then it will create + its own main window, including a "Done" button and a "Print" + button. + """ + + def __init__(self, parent=None, **kw): + """ + Create a new ``CanvasFrame``. + + :type parent: Tkinter.BaseWidget or Tkinter.Tk + :param parent: The parent ``Tkinter`` widget. If no parent is + specified, then ``CanvasFrame`` will create a new main + window. + :param kw: Keyword arguments for the new ``Canvas``. See the + documentation for ``Tkinter.Canvas`` for more information. + """ + # If no parent was given, set up a top-level window. + if parent is None: + self._parent = Tk() + self._parent.title("NLTK") + self._parent.bind("", lambda e: self.print_to_file()) + self._parent.bind("", self.destroy) + self._parent.bind("", self.destroy) + else: + self._parent = parent + + # Create a frame for the canvas & scrollbars + self._frame = frame = Frame(self._parent) + self._canvas = canvas = Canvas(frame, **kw) + xscrollbar = Scrollbar(self._frame, orient="horizontal") + yscrollbar = Scrollbar(self._frame, orient="vertical") + xscrollbar["command"] = canvas.xview + yscrollbar["command"] = canvas.yview + canvas["xscrollcommand"] = xscrollbar.set + canvas["yscrollcommand"] = yscrollbar.set + yscrollbar.pack(fill="y", side="right") + xscrollbar.pack(fill="x", side="bottom") + canvas.pack(expand=1, fill="both", side="left") + + # Set initial scroll region. + scrollregion = "0 0 {} {}".format(canvas["width"], canvas["height"]) + canvas["scrollregion"] = scrollregion + + self._scrollwatcher = ScrollWatcherWidget(canvas) + + # If no parent was given, pack the frame, and add a menu. + if parent is None: + self.pack(expand=1, fill="both") + self._init_menubar() + + def _init_menubar(self): + menubar = Menu(self._parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self.print_to_file, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + self._parent.config(menu=menubar) + + def print_to_file(self, filename=None): + """ + Print the contents of this ``CanvasFrame`` to a postscript + file. If no filename is given, then prompt the user for one. + + :param filename: The name of the file to print the tree to. + :type filename: str + :rtype: None + """ + if filename is None: + ftypes = [("Postscript files", ".ps"), ("All files", "*")] + filename = asksaveasfilename(filetypes=ftypes, defaultextension=".ps") + if not filename: + return + (x0, y0, w, h) = self.scrollregion() + postscript = self._canvas.postscript( + x=x0, + y=y0, + width=w + 2, + height=h + 2, + pagewidth=w + 2, # points = 1/72 inch + pageheight=h + 2, # points = 1/72 inch + pagex=0, + pagey=0, + ) + # workaround for bug in Tk font handling + postscript = postscript.replace(" 0 scalefont ", " 9 scalefont ") + with open(filename, "wb") as f: + f.write(postscript.encode("utf8")) + + def scrollregion(self): + """ + :return: The current scroll region for the canvas managed by + this ``CanvasFrame``. + :rtype: 4-tuple of int + """ + (x1, y1, x2, y2) = self._canvas["scrollregion"].split() + return (int(x1), int(y1), int(x2), int(y2)) + + def canvas(self): + """ + :return: The canvas managed by this ``CanvasFrame``. + :rtype: Tkinter.Canvas + """ + return self._canvas + + def add_widget(self, canvaswidget, x=None, y=None): + """ + Register a canvas widget with this ``CanvasFrame``. The + ``CanvasFrame`` will ensure that this canvas widget is always + within the ``Canvas``'s scrollregion. If no coordinates are + given for the canvas widget, then the ``CanvasFrame`` will + attempt to find a clear area of the canvas for it. + + :type canvaswidget: CanvasWidget + :param canvaswidget: The new canvas widget. ``canvaswidget`` + must have been created on this ``CanvasFrame``'s canvas. + :type x: int + :param x: The initial x coordinate for the upper left hand + corner of ``canvaswidget``, in the canvas's coordinate + space. + :type y: int + :param y: The initial y coordinate for the upper left hand + corner of ``canvaswidget``, in the canvas's coordinate + space. + """ + if x is None or y is None: + (x, y) = self._find_room(canvaswidget, x, y) + + # Move to (x,y) + (x1, y1, x2, y2) = canvaswidget.bbox() + canvaswidget.move(x - x1, y - y1) + + # Register with scrollwatcher. + self._scrollwatcher.add_child(canvaswidget) + + def _find_room(self, widget, desired_x, desired_y): + """ + Try to find a space for a given widget. + """ + (left, top, right, bot) = self.scrollregion() + w = widget.width() + h = widget.height() + + if w >= (right - left): + return (0, 0) + if h >= (bot - top): + return (0, 0) + + # Move the widget out of the way, for now. + (x1, y1, x2, y2) = widget.bbox() + widget.move(left - x2 - 50, top - y2 - 50) + + if desired_x is not None: + x = desired_x + for y in range(top, bot - h, int((bot - top - h) / 10)): + if not self._canvas.find_overlapping( + x - 5, y - 5, x + w + 5, y + h + 5 + ): + return (x, y) + + if desired_y is not None: + y = desired_y + for x in range(left, right - w, int((right - left - w) / 10)): + if not self._canvas.find_overlapping( + x - 5, y - 5, x + w + 5, y + h + 5 + ): + return (x, y) + + for y in range(top, bot - h, int((bot - top - h) / 10)): + for x in range(left, right - w, int((right - left - w) / 10)): + if not self._canvas.find_overlapping( + x - 5, y - 5, x + w + 5, y + h + 5 + ): + return (x, y) + return (0, 0) + + def destroy_widget(self, canvaswidget): + """ + Remove a canvas widget from this ``CanvasFrame``. This + deregisters the canvas widget, and destroys it. + """ + self.remove_widget(canvaswidget) + canvaswidget.destroy() + + def remove_widget(self, canvaswidget): + # Deregister with scrollwatcher. + self._scrollwatcher.remove_child(canvaswidget) + + def pack(self, cnf={}, **kw): + """ + Pack this ``CanvasFrame``. See the documentation for + ``Tkinter.Pack`` for more information. + """ + self._frame.pack(cnf, **kw) + # Adjust to be big enough for kids? + + def destroy(self, *e): + """ + Destroy this ``CanvasFrame``. If this ``CanvasFrame`` created a + top-level window, then this will close that window. + """ + if self._parent is None: + return + self._parent.destroy() + self._parent = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this frame is created from a non-interactive program (e.g. + from a secript); otherwise, the frame will close as soon as + the script completes. + """ + if in_idle(): + return + self._parent.mainloop(*args, **kwargs) + + +##////////////////////////////////////////////////////// +## Text display +##////////////////////////////////////////////////////// + + +class ShowText: + """ + A ``Tkinter`` window used to display a text. ``ShowText`` is + typically used by graphical tools to display help text, or similar + information. + """ + + def __init__(self, root, title, text, width=None, height=None, **textbox_options): + if width is None or height is None: + (width, height) = self.find_dimentions(text, width, height) + + # Create the main window. + if root is None: + self._top = top = Tk() + else: + self._top = top = Toplevel(root) + top.title(title) + + b = Button(top, text="Ok", command=self.destroy) + b.pack(side="bottom") + + tbf = Frame(top) + tbf.pack(expand=1, fill="both") + scrollbar = Scrollbar(tbf, orient="vertical") + scrollbar.pack(side="right", fill="y") + textbox = Text(tbf, wrap="word", width=width, height=height, **textbox_options) + textbox.insert("end", text) + textbox["state"] = "disabled" + textbox.pack(side="left", expand=1, fill="both") + scrollbar["command"] = textbox.yview + textbox["yscrollcommand"] = scrollbar.set + + # Make it easy to close the window. + top.bind("q", self.destroy) + top.bind("x", self.destroy) + top.bind("c", self.destroy) + top.bind("", self.destroy) + top.bind("", self.destroy) + + # Focus the scrollbar, so they can use up/down, etc. + scrollbar.focus() + + def find_dimentions(self, text, width, height): + lines = text.split("\n") + if width is None: + maxwidth = max(len(line) for line in lines) + width = min(maxwidth, 80) + + # Now, find height. + height = 0 + for line in lines: + while len(line) > width: + brk = line[:width].rfind(" ") + line = line[brk:] + height += 1 + height += 1 + height = min(height, 25) + + return (width, height) + + def destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this window is created from a non-interactive program (e.g. + from a secript); otherwise, the window will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + +##////////////////////////////////////////////////////// +## Entry dialog +##////////////////////////////////////////////////////// + + +class EntryDialog: + """ + A dialog box for entering + """ + + def __init__( + self, parent, original_text="", instructions="", set_callback=None, title=None + ): + self._parent = parent + self._original_text = original_text + self._set_callback = set_callback + + width = int(max(30, len(original_text) * 3 / 2)) + self._top = Toplevel(parent) + + if title: + self._top.title(title) + + # The text entry box. + entryframe = Frame(self._top) + entryframe.pack(expand=1, fill="both", padx=5, pady=5, ipady=10) + if instructions: + l = Label(entryframe, text=instructions) + l.pack(side="top", anchor="w", padx=30) + self._entry = Entry(entryframe, width=width) + self._entry.pack(expand=1, fill="x", padx=30) + self._entry.insert(0, original_text) + + # A divider + divider = Frame(self._top, borderwidth=1, relief="sunken") + divider.pack(fill="x", ipady=1, padx=10) + + # The buttons. + buttons = Frame(self._top) + buttons.pack(expand=0, fill="x", padx=5, pady=5) + b = Button(buttons, text="Cancel", command=self._cancel, width=8) + b.pack(side="right", padx=5) + b = Button(buttons, text="Ok", command=self._ok, width=8, default="active") + b.pack(side="left", padx=5) + b = Button(buttons, text="Apply", command=self._apply, width=8) + b.pack(side="left") + + self._top.bind("", self._ok) + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + + self._entry.focus() + + def _reset(self, *e): + self._entry.delete(0, "end") + self._entry.insert(0, self._original_text) + if self._set_callback: + self._set_callback(self._original_text) + + def _cancel(self, *e): + try: + self._reset() + except: + pass + self._destroy() + + def _ok(self, *e): + self._apply() + self._destroy() + + def _apply(self, *e): + if self._set_callback: + self._set_callback(self._entry.get()) + + def _destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + +##////////////////////////////////////////////////////// +## Colorized List +##////////////////////////////////////////////////////// + + +class ColorizedList: + """ + An abstract base class for displaying a colorized list of items. + Subclasses should define: + + - ``_init_colortags``, which sets up Text color tags that + will be used by the list. + - ``_item_repr``, which returns a list of (text,colortag) + tuples that make up the colorized representation of the + item. + + :note: Typically, you will want to register a callback for + ``'select'`` that calls ``mark`` on the given item. + """ + + def __init__(self, parent, items=[], **options): + """ + Construct a new list. + + :param parent: The Tk widget that contains the colorized list + :param items: The initial contents of the colorized list. + :param options: + """ + self._parent = parent + self._callbacks = {} + + # Which items are marked? + self._marks = {} + + # Initialize the Tkinter frames. + self._init_itemframe(options.copy()) + + # Set up key & mouse bindings. + self._textwidget.bind("", self._keypress) + self._textwidget.bind("", self._buttonpress) + + # Fill in the given CFG's items. + self._items = None + self.set(items) + + # //////////////////////////////////////////////////////////// + # Abstract methods + # //////////////////////////////////////////////////////////// + @abstractmethod + def _init_colortags(self, textwidget, options): + """ + Set up any colortags that will be used by this colorized list. + E.g.: + textwidget.tag_config('terminal', foreground='black') + """ + + @abstractmethod + def _item_repr(self, item): + """ + Return a list of (text, colortag) tuples that make up the + colorized representation of the item. Colorized + representations may not span multiple lines. I.e., the text + strings returned may not contain newline characters. + """ + + # //////////////////////////////////////////////////////////// + # Item Access + # //////////////////////////////////////////////////////////// + + def get(self, index=None): + """ + :return: A list of the items contained by this list. + """ + if index is None: + return self._items[:] + else: + return self._items[index] + + def set(self, items): + """ + Modify the list of items contained by this list. + """ + items = list(items) + if self._items == items: + return + self._items = list(items) + + self._textwidget["state"] = "normal" + self._textwidget.delete("1.0", "end") + for item in items: + for (text, colortag) in self._item_repr(item): + assert "\n" not in text, "item repr may not contain newline" + self._textwidget.insert("end", text, colortag) + self._textwidget.insert("end", "\n") + # Remove the final newline + self._textwidget.delete("end-1char", "end") + self._textwidget.mark_set("insert", "1.0") + self._textwidget["state"] = "disabled" + # Clear all marks + self._marks.clear() + + def unmark(self, item=None): + """ + Remove highlighting from the given item; or from every item, + if no item is given. + :raise ValueError: If ``item`` is not contained in the list. + :raise KeyError: If ``item`` is not marked. + """ + if item is None: + self._marks.clear() + self._textwidget.tag_remove("highlight", "1.0", "end+1char") + else: + index = self._items.index(item) + del self._marks[item] + (start, end) = ("%d.0" % (index + 1), "%d.0" % (index + 2)) + self._textwidget.tag_remove("highlight", start, end) + + def mark(self, item): + """ + Highlight the given item. + :raise ValueError: If ``item`` is not contained in the list. + """ + self._marks[item] = 1 + index = self._items.index(item) + (start, end) = ("%d.0" % (index + 1), "%d.0" % (index + 2)) + self._textwidget.tag_add("highlight", start, end) + + def markonly(self, item): + """ + Remove any current highlighting, and mark the given item. + :raise ValueError: If ``item`` is not contained in the list. + """ + self.unmark() + self.mark(item) + + def view(self, item): + """ + Adjust the view such that the given item is visible. If + the item is already visible, then do nothing. + """ + index = self._items.index(item) + self._textwidget.see("%d.0" % (index + 1)) + + # //////////////////////////////////////////////////////////// + # Callbacks + # //////////////////////////////////////////////////////////// + + def add_callback(self, event, func): + """ + Register a callback function with the list. This function + will be called whenever the given event occurs. + + :param event: The event that will trigger the callback + function. Valid events are: click1, click2, click3, + space, return, select, up, down, next, prior, move + :param func: The function that should be called when + the event occurs. ``func`` will be called with a + single item as its argument. (The item selected + or the item moved to). + """ + if event == "select": + events = ["click1", "space", "return"] + elif event == "move": + events = ["up", "down", "next", "prior"] + else: + events = [event] + + for e in events: + self._callbacks.setdefault(e, {})[func] = 1 + + def remove_callback(self, event, func=None): + """ + Deregister a callback function. If ``func`` is none, then + all callbacks are removed for the given event. + """ + if event is None: + events = list(self._callbacks.keys()) + elif event == "select": + events = ["click1", "space", "return"] + elif event == "move": + events = ["up", "down", "next", "prior"] + else: + events = [event] + + for e in events: + if func is None: + del self._callbacks[e] + else: + try: + del self._callbacks[e][func] + except: + pass + + # //////////////////////////////////////////////////////////// + # Tkinter Methods + # //////////////////////////////////////////////////////////// + + def pack(self, cnf={}, **kw): + # "@include: Tkinter.Pack.pack" + self._itemframe.pack(cnf, **kw) + + def grid(self, cnf={}, **kw): + # "@include: Tkinter.Grid.grid" + self._itemframe.grid(cnf, *kw) + + def focus(self): + # "@include: Tkinter.Widget.focus" + self._textwidget.focus() + + # //////////////////////////////////////////////////////////// + # Internal Methods + # //////////////////////////////////////////////////////////// + + def _init_itemframe(self, options): + self._itemframe = Frame(self._parent) + + # Create the basic Text widget & scrollbar. + options.setdefault("background", "#e0e0e0") + self._textwidget = Text(self._itemframe, **options) + self._textscroll = Scrollbar(self._itemframe, takefocus=0, orient="vertical") + self._textwidget.config(yscrollcommand=self._textscroll.set) + self._textscroll.config(command=self._textwidget.yview) + self._textscroll.pack(side="right", fill="y") + self._textwidget.pack(expand=1, fill="both", side="left") + + # Initialize the colorization tags + self._textwidget.tag_config( + "highlight", background="#e0ffff", border="1", relief="raised" + ) + self._init_colortags(self._textwidget, options) + + # How do I want to mark keyboard selection? + self._textwidget.tag_config("sel", foreground="") + self._textwidget.tag_config( + "sel", foreground="", background="", border="", underline=1 + ) + self._textwidget.tag_lower("highlight", "sel") + + def _fire_callback(self, event, itemnum): + if event not in self._callbacks: + return + if 0 <= itemnum < len(self._items): + item = self._items[itemnum] + else: + item = None + for cb_func in list(self._callbacks[event].keys()): + cb_func(item) + + def _buttonpress(self, event): + clickloc = "@%d,%d" % (event.x, event.y) + insert_point = self._textwidget.index(clickloc) + itemnum = int(insert_point.split(".")[0]) - 1 + self._fire_callback("click%d" % event.num, itemnum) + + def _keypress(self, event): + if event.keysym == "Return" or event.keysym == "space": + insert_point = self._textwidget.index("insert") + itemnum = int(insert_point.split(".")[0]) - 1 + self._fire_callback(event.keysym.lower(), itemnum) + return + elif event.keysym == "Down": + delta = "+1line" + elif event.keysym == "Up": + delta = "-1line" + elif event.keysym == "Next": + delta = "+10lines" + elif event.keysym == "Prior": + delta = "-10lines" + else: + return "continue" + + self._textwidget.mark_set("insert", "insert" + delta) + self._textwidget.see("insert") + self._textwidget.tag_remove("sel", "1.0", "end+1char") + self._textwidget.tag_add("sel", "insert linestart", "insert lineend") + + insert_point = self._textwidget.index("insert") + itemnum = int(insert_point.split(".")[0]) - 1 + self._fire_callback(event.keysym.lower(), itemnum) + + return "break" + + +##////////////////////////////////////////////////////// +## Improved OptionMenu +##////////////////////////////////////////////////////// + + +class MutableOptionMenu(Menubutton): + def __init__(self, master, values, **options): + self._callback = options.get("command") + if "command" in options: + del options["command"] + + # Create a variable + self._variable = variable = StringVar() + if len(values) > 0: + variable.set(values[0]) + + kw = { + "borderwidth": 2, + "textvariable": variable, + "indicatoron": 1, + "relief": RAISED, + "anchor": "c", + "highlightthickness": 2, + } + kw.update(options) + Widget.__init__(self, master, "menubutton", kw) + self.widgetName = "tk_optionMenu" + self._menu = Menu(self, name="menu", tearoff=0) + self.menuname = self._menu._w + + self._values = [] + for value in values: + self.add(value) + + self["menu"] = self._menu + + def add(self, value): + if value in self._values: + return + + def set(value=value): + self.set(value) + + self._menu.add_command(label=value, command=set) + self._values.append(value) + + def set(self, value): + self._variable.set(value) + if self._callback: + self._callback(value) + + def remove(self, value): + # Might raise indexerror: pass to parent. + i = self._values.index(value) + del self._values[i] + self._menu.delete(i, i) + + def __getitem__(self, name): + if name == "menu": + return self.__menu + return Widget.__getitem__(self, name) + + def destroy(self): + """Destroy this widget and the associated menu.""" + Menubutton.destroy(self) + self._menu = None + + +##////////////////////////////////////////////////////// +## Test code. +##////////////////////////////////////////////////////// + + +def demo(): + """ + A simple demonstration showing how to use canvas widgets. + """ + + def fill(cw): + from random import randint + + cw["fill"] = "#00%04d" % randint(0, 9999) + + def color(cw): + from random import randint + + cw["color"] = "#ff%04d" % randint(0, 9999) + + cf = CanvasFrame(closeenough=10, width=300, height=300) + c = cf.canvas() + ct3 = TextWidget(c, "hiya there", draggable=1) + ct2 = TextWidget(c, "o o\n||\n___\n U", draggable=1, justify="center") + co = OvalWidget(c, ct2, outline="red") + ct = TextWidget(c, "o o\n||\n\\___/", draggable=1, justify="center") + cp = ParenWidget(c, ct, color="red") + cb = BoxWidget(c, cp, fill="cyan", draggable=1, width=3, margin=10) + equation = SequenceWidget( + c, + SymbolWidget(c, "forall"), + TextWidget(c, "x"), + SymbolWidget(c, "exists"), + TextWidget(c, "y: "), + TextWidget(c, "x"), + SymbolWidget(c, "notequal"), + TextWidget(c, "y"), + ) + space = SpaceWidget(c, 0, 30) + cstack = StackWidget(c, cb, ct3, space, co, equation, align="center") + prompt_msg = TextWidget( + c, "try clicking\nand dragging", draggable=1, justify="center" + ) + cs = SequenceWidget(c, cstack, prompt_msg) + zz = BracketWidget(c, cs, color="green4", width=3) + cf.add_widget(zz, 60, 30) + + cb.bind_click(fill) + ct.bind_click(color) + co.bind_click(fill) + ct2.bind_click(color) + ct3.bind_click(color) + + cf.mainloop() + # ShowText(None, 'title', ((('this is text'*150)+'\n')*5)) + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/inference/__init__.py b/lib/python3.10/site-packages/nltk/inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..754b3d2d78286799b229700bb19bd21cb683b855 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/inference/__init__.py @@ -0,0 +1,24 @@ +# Natural Language Toolkit: Inference +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Garrette +# Ewan Klein +# +# URL: +# For license information, see LICENSE.TXT + +""" +Classes and interfaces for theorem proving and model building. +""" + +from nltk.inference.api import ParallelProverBuilder, ParallelProverBuilderCommand +from nltk.inference.discourse import ( + CfgReadingCommand, + DiscourseTester, + DrtGlueReadingCommand, + ReadingCommand, +) +from nltk.inference.mace import Mace, MaceCommand +from nltk.inference.prover9 import Prover9, Prover9Command +from nltk.inference.resolution import ResolutionProver, ResolutionProverCommand +from nltk.inference.tableau import TableauProver, TableauProverCommand diff --git a/lib/python3.10/site-packages/nltk/inference/api.py b/lib/python3.10/site-packages/nltk/inference/api.py new file mode 100644 index 0000000000000000000000000000000000000000..12f1c099941280c1a72f40f957330dc5497a1b27 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/inference/api.py @@ -0,0 +1,614 @@ +# Natural Language Toolkit: Classifier Interface +# +# Author: Ewan Klein +# Dan Garrette +# +# URL: +# For license information, see LICENSE.TXT + +""" +Interfaces and base classes for theorem provers and model builders. + +``Prover`` is a standard interface for a theorem prover which tries to prove a goal from a +list of assumptions. + +``ModelBuilder`` is a standard interface for a model builder. Given just a set of assumptions. +the model builder tries to build a model for the assumptions. Given a set of assumptions and a +goal *G*, the model builder tries to find a counter-model, in the sense of a model that will satisfy +the assumptions plus the negation of *G*. +""" + +import threading +import time +from abc import ABCMeta, abstractmethod + + +class Prover(metaclass=ABCMeta): + """ + Interface for trying to prove a goal from assumptions. Both the goal and + the assumptions are constrained to be formulas of ``logic.Expression``. + """ + + def prove(self, goal=None, assumptions=None, verbose=False): + """ + :return: Whether the proof was successful or not. + :rtype: bool + """ + return self._prove(goal, assumptions, verbose)[0] + + @abstractmethod + def _prove(self, goal=None, assumptions=None, verbose=False): + """ + :return: Whether the proof was successful or not, along with the proof + :rtype: tuple: (bool, str) + """ + + +class ModelBuilder(metaclass=ABCMeta): + """ + Interface for trying to build a model of set of formulas. + Open formulas are assumed to be universally quantified. + Both the goal and the assumptions are constrained to be formulas + of ``logic.Expression``. + """ + + def build_model(self, goal=None, assumptions=None, verbose=False): + """ + Perform the actual model building. + :return: Whether a model was generated + :rtype: bool + """ + return self._build_model(goal, assumptions, verbose)[0] + + @abstractmethod + def _build_model(self, goal=None, assumptions=None, verbose=False): + """ + Perform the actual model building. + :return: Whether a model was generated, and the model itself + :rtype: tuple(bool, sem.Valuation) + """ + + +class TheoremToolCommand(metaclass=ABCMeta): + """ + This class holds a goal and a list of assumptions to be used in proving + or model building. + """ + + @abstractmethod + def add_assumptions(self, new_assumptions): + """ + Add new assumptions to the assumption list. + + :param new_assumptions: new assumptions + :type new_assumptions: list(sem.Expression) + """ + + @abstractmethod + def retract_assumptions(self, retracted, debug=False): + """ + Retract assumptions from the assumption list. + + :param debug: If True, give warning when ``retracted`` is not present on + assumptions list. + :type debug: bool + :param retracted: assumptions to be retracted + :type retracted: list(sem.Expression) + """ + + @abstractmethod + def assumptions(self): + """ + List the current assumptions. + + :return: list of ``Expression`` + """ + + @abstractmethod + def goal(self): + """ + Return the goal + + :return: ``Expression`` + """ + + @abstractmethod + def print_assumptions(self): + """ + Print the list of the current assumptions. + """ + + +class ProverCommand(TheoremToolCommand): + """ + This class holds a ``Prover``, a goal, and a list of assumptions. When + prove() is called, the ``Prover`` is executed with the goal and assumptions. + """ + + @abstractmethod + def prove(self, verbose=False): + """ + Perform the actual proof. + """ + + @abstractmethod + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + + @abstractmethod + def get_prover(self): + """ + Return the prover object + :return: ``Prover`` + """ + + +class ModelBuilderCommand(TheoremToolCommand): + """ + This class holds a ``ModelBuilder``, a goal, and a list of assumptions. + When build_model() is called, the ``ModelBuilder`` is executed with the goal + and assumptions. + """ + + @abstractmethod + def build_model(self, verbose=False): + """ + Perform the actual model building. + :return: A model if one is generated; None otherwise. + :rtype: sem.Valuation + """ + + @abstractmethod + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + + @abstractmethod + def get_model_builder(self): + """ + Return the model builder object + :return: ``ModelBuilder`` + """ + + +class BaseTheoremToolCommand(TheoremToolCommand): + """ + This class holds a goal and a list of assumptions to be used in proving + or model building. + """ + + def __init__(self, goal=None, assumptions=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + """ + self._goal = goal + + if not assumptions: + self._assumptions = [] + else: + self._assumptions = list(assumptions) + + self._result = None + """A holder for the result, to prevent unnecessary re-proving""" + + def add_assumptions(self, new_assumptions): + """ + Add new assumptions to the assumption list. + + :param new_assumptions: new assumptions + :type new_assumptions: list(sem.Expression) + """ + self._assumptions.extend(new_assumptions) + self._result = None + + def retract_assumptions(self, retracted, debug=False): + """ + Retract assumptions from the assumption list. + + :param debug: If True, give warning when ``retracted`` is not present on + assumptions list. + :type debug: bool + :param retracted: assumptions to be retracted + :type retracted: list(sem.Expression) + """ + retracted = set(retracted) + result_list = list(filter(lambda a: a not in retracted, self._assumptions)) + if debug and result_list == self._assumptions: + print(Warning("Assumptions list has not been changed:")) + self.print_assumptions() + + self._assumptions = result_list + + self._result = None + + def assumptions(self): + """ + List the current assumptions. + + :return: list of ``Expression`` + """ + return self._assumptions + + def goal(self): + """ + Return the goal + + :return: ``Expression`` + """ + return self._goal + + def print_assumptions(self): + """ + Print the list of the current assumptions. + """ + for a in self.assumptions(): + print(a) + + +class BaseProverCommand(BaseTheoremToolCommand, ProverCommand): + """ + This class holds a ``Prover``, a goal, and a list of assumptions. When + prove() is called, the ``Prover`` is executed with the goal and assumptions. + """ + + def __init__(self, prover, goal=None, assumptions=None): + """ + :param prover: The theorem tool to execute with the assumptions + :type prover: Prover + :see: ``BaseTheoremToolCommand`` + """ + self._prover = prover + """The theorem tool to execute with the assumptions""" + + BaseTheoremToolCommand.__init__(self, goal, assumptions) + + self._proof = None + + def prove(self, verbose=False): + """ + Perform the actual proof. Store the result to prevent unnecessary + re-proving. + """ + if self._result is None: + self._result, self._proof = self._prover._prove( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call prove() first to get a proof!") + else: + return self.decorate_proof(self._proof, simplify) + + def decorate_proof(self, proof_string, simplify=True): + """ + Modify and return the proof string + :param proof_string: str the proof to decorate + :param simplify: bool simplify the proof? + :return: str + """ + return proof_string + + def get_prover(self): + return self._prover + + +class BaseModelBuilderCommand(BaseTheoremToolCommand, ModelBuilderCommand): + """ + This class holds a ``ModelBuilder``, a goal, and a list of assumptions. When + build_model() is called, the ``ModelBuilder`` is executed with the goal and + assumptions. + """ + + def __init__(self, modelbuilder, goal=None, assumptions=None): + """ + :param modelbuilder: The theorem tool to execute with the assumptions + :type modelbuilder: ModelBuilder + :see: ``BaseTheoremToolCommand`` + """ + self._modelbuilder = modelbuilder + """The theorem tool to execute with the assumptions""" + + BaseTheoremToolCommand.__init__(self, goal, assumptions) + + self._model = None + + def build_model(self, verbose=False): + """ + Attempt to build a model. Store the result to prevent unnecessary + re-building. + """ + if self._result is None: + self._result, self._model = self._modelbuilder._build_model( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call build_model() first to " "get a model!") + else: + return self._decorate_model(self._model, format) + + def _decorate_model(self, valuation_str, format=None): + """ + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + :return: str + """ + return valuation_str + + def get_model_builder(self): + return self._modelbuilder + + +class TheoremToolCommandDecorator(TheoremToolCommand): + """ + A base decorator for the ``ProverCommandDecorator`` and + ``ModelBuilderCommandDecorator`` classes from which decorators can extend. + """ + + def __init__(self, command): + """ + :param command: ``TheoremToolCommand`` to decorate + """ + self._command = command + + # The decorator has its own versions of 'result' different from the + # underlying command + self._result = None + + def assumptions(self): + return self._command.assumptions() + + def goal(self): + return self._command.goal() + + def add_assumptions(self, new_assumptions): + self._command.add_assumptions(new_assumptions) + self._result = None + + def retract_assumptions(self, retracted, debug=False): + self._command.retract_assumptions(retracted, debug) + self._result = None + + def print_assumptions(self): + self._command.print_assumptions() + + +class ProverCommandDecorator(TheoremToolCommandDecorator, ProverCommand): + """ + A base decorator for the ``ProverCommand`` class from which other + prover command decorators can extend. + """ + + def __init__(self, proverCommand): + """ + :param proverCommand: ``ProverCommand`` to decorate + """ + TheoremToolCommandDecorator.__init__(self, proverCommand) + + # The decorator has its own versions of 'result' and 'proof' + # because they may be different from the underlying command + self._proof = None + + def prove(self, verbose=False): + if self._result is None: + prover = self.get_prover() + self._result, self._proof = prover._prove( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call prove() first to get a proof!") + else: + return self.decorate_proof(self._proof, simplify) + + def decorate_proof(self, proof_string, simplify=True): + """ + Modify and return the proof string + :param proof_string: str the proof to decorate + :param simplify: bool simplify the proof? + :return: str + """ + return self._command.decorate_proof(proof_string, simplify) + + def get_prover(self): + return self._command.get_prover() + + +class ModelBuilderCommandDecorator(TheoremToolCommandDecorator, ModelBuilderCommand): + """ + A base decorator for the ``ModelBuilderCommand`` class from which other + prover command decorators can extend. + """ + + def __init__(self, modelBuilderCommand): + """ + :param modelBuilderCommand: ``ModelBuilderCommand`` to decorate + """ + TheoremToolCommandDecorator.__init__(self, modelBuilderCommand) + + # The decorator has its own versions of 'result' and 'valuation' + # because they may be different from the underlying command + self._model = None + + def build_model(self, verbose=False): + """ + Attempt to build a model. Store the result to prevent unnecessary + re-building. + """ + if self._result is None: + modelbuilder = self.get_model_builder() + self._result, self._model = modelbuilder._build_model( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call build_model() first to " "get a model!") + else: + return self._decorate_model(self._model, format) + + def _decorate_model(self, valuation_str, format=None): + """ + Modify and return the proof string + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + :return: str + """ + return self._command._decorate_model(valuation_str, format) + + def get_model_builder(self): + return self._command.get_prover() + + +class ParallelProverBuilder(Prover, ModelBuilder): + """ + This class stores both a prover and a model builder and when either + prove() or build_model() is called, then both theorem tools are run in + parallel. Whichever finishes first, the prover or the model builder, is the + result that will be used. + """ + + def __init__(self, prover, modelbuilder): + self._prover = prover + self._modelbuilder = modelbuilder + + def _prove(self, goal=None, assumptions=None, verbose=False): + return self._run(goal, assumptions, verbose), "" + + def _build_model(self, goal=None, assumptions=None, verbose=False): + return not self._run(goal, assumptions, verbose), "" + + def _run(self, goal, assumptions, verbose): + # Set up two thread, Prover and ModelBuilder to run in parallel + tp_thread = TheoremToolThread( + lambda: self._prover.prove(goal, assumptions, verbose), verbose, "TP" + ) + mb_thread = TheoremToolThread( + lambda: self._modelbuilder.build_model(goal, assumptions, verbose), + verbose, + "MB", + ) + + tp_thread.start() + mb_thread.start() + + while tp_thread.is_alive() and mb_thread.is_alive(): + # wait until either the prover or the model builder is done + pass + + if tp_thread.result is not None: + return tp_thread.result + elif mb_thread.result is not None: + return not mb_thread.result + else: + return None + + +class ParallelProverBuilderCommand(BaseProverCommand, BaseModelBuilderCommand): + """ + This command stores both a prover and a model builder and when either + prove() or build_model() is called, then both theorem tools are run in + parallel. Whichever finishes first, the prover or the model builder, is the + result that will be used. + + Because the theorem prover result is the opposite of the model builder + result, we will treat self._result as meaning "proof found/no model found". + """ + + def __init__(self, prover, modelbuilder, goal=None, assumptions=None): + BaseProverCommand.__init__(self, prover, goal, assumptions) + BaseModelBuilderCommand.__init__(self, modelbuilder, goal, assumptions) + + def prove(self, verbose=False): + return self._run(verbose) + + def build_model(self, verbose=False): + return not self._run(verbose) + + def _run(self, verbose): + # Set up two thread, Prover and ModelBuilder to run in parallel + tp_thread = TheoremToolThread( + lambda: BaseProverCommand.prove(self, verbose), verbose, "TP" + ) + mb_thread = TheoremToolThread( + lambda: BaseModelBuilderCommand.build_model(self, verbose), verbose, "MB" + ) + + tp_thread.start() + mb_thread.start() + + while tp_thread.is_alive() and mb_thread.is_alive(): + # wait until either the prover or the model builder is done + pass + + if tp_thread.result is not None: + self._result = tp_thread.result + elif mb_thread.result is not None: + self._result = not mb_thread.result + return self._result + + +class TheoremToolThread(threading.Thread): + def __init__(self, command, verbose, name=None): + threading.Thread.__init__(self) + self._command = command + self._result = None + self._verbose = verbose + self._name = name + + def run(self): + try: + self._result = self._command() + if self._verbose: + print( + "Thread %s finished with result %s at %s" + % (self._name, self._result, time.localtime(time.time())) + ) + except Exception as e: + print(e) + print("Thread %s completed abnormally" % (self._name)) + + @property + def result(self): + return self._result diff --git a/lib/python3.10/site-packages/nltk/inference/discourse.py b/lib/python3.10/site-packages/nltk/inference/discourse.py new file mode 100644 index 0000000000000000000000000000000000000000..9630234dcf3837d9da2b4213fe26d22491899932 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/inference/discourse.py @@ -0,0 +1,651 @@ +# Natural Language Toolkit: Discourse Processing +# +# Author: Ewan Klein +# Dan Garrette +# +# URL: +# For license information, see LICENSE.TXT + +r""" +Module for incrementally developing simple discourses, and checking for semantic ambiguity, +consistency and informativeness. + +Many of the ideas are based on the CURT family of programs of Blackburn and Bos +(see http://homepages.inf.ed.ac.uk/jbos/comsem/book1.html). + +Consistency checking is carried out by using the ``mace`` module to call the Mace4 model builder. +Informativeness checking is carried out with a call to ``Prover.prove()`` from +the ``inference`` module. + +``DiscourseTester`` is a constructor for discourses. +The basic data structure is a list of sentences, stored as ``self._sentences``. Each sentence in the list +is assigned a "sentence ID" (``sid``) of the form ``s``\ *i*. For example:: + + s0: A boxer walks + s1: Every boxer chases a girl + +Each sentence can be ambiguous between a number of readings, each of which receives a +"reading ID" (``rid``) of the form ``s``\ *i* -``r``\ *j*. For example:: + + s0 readings: + + s0-r1: some x.(boxer(x) & walk(x)) + s0-r0: some x.(boxerdog(x) & walk(x)) + +A "thread" is a list of readings, represented as a list of ``rid``\ s. +Each thread receives a "thread ID" (``tid``) of the form ``d``\ *i*. +For example:: + + d0: ['s0-r0', 's1-r0'] + +The set of all threads for a discourse is the Cartesian product of all the readings of the sequences of sentences. +(This is not intended to scale beyond very short discourses!) The method ``readings(filter=True)`` will only show +those threads which are consistent (taking into account any background assumptions). +""" + +import os +from abc import ABCMeta, abstractmethod +from functools import reduce +from operator import add, and_ + +from nltk.data import show_cfg +from nltk.inference.mace import MaceCommand +from nltk.inference.prover9 import Prover9Command +from nltk.parse import load_parser +from nltk.parse.malt import MaltParser +from nltk.sem.drt import AnaphoraResolutionException, resolve_anaphora +from nltk.sem.glue import DrtGlue +from nltk.sem.logic import Expression +from nltk.tag import RegexpTagger + + +class ReadingCommand(metaclass=ABCMeta): + @abstractmethod + def parse_to_readings(self, sentence): + """ + :param sentence: the sentence to read + :type sentence: str + """ + + def process_thread(self, sentence_readings): + """ + This method should be used to handle dependencies between readings such + as resolving anaphora. + + :param sentence_readings: readings to process + :type sentence_readings: list(Expression) + :return: the list of readings after processing + :rtype: list(Expression) + """ + return sentence_readings + + @abstractmethod + def combine_readings(self, readings): + """ + :param readings: readings to combine + :type readings: list(Expression) + :return: one combined reading + :rtype: Expression + """ + + @abstractmethod + def to_fol(self, expression): + """ + Convert this expression into a First-Order Logic expression. + + :param expression: an expression + :type expression: Expression + :return: a FOL version of the input expression + :rtype: Expression + """ + + +class CfgReadingCommand(ReadingCommand): + def __init__(self, gramfile=None): + """ + :param gramfile: name of file where grammar can be loaded + :type gramfile: str + """ + self._gramfile = ( + gramfile if gramfile else "grammars/book_grammars/discourse.fcfg" + ) + self._parser = load_parser(self._gramfile) + + def parse_to_readings(self, sentence): + """:see: ReadingCommand.parse_to_readings()""" + from nltk.sem import root_semrep + + tokens = sentence.split() + trees = self._parser.parse(tokens) + return [root_semrep(tree) for tree in trees] + + def combine_readings(self, readings): + """:see: ReadingCommand.combine_readings()""" + return reduce(and_, readings) + + def to_fol(self, expression): + """:see: ReadingCommand.to_fol()""" + return expression + + +class DrtGlueReadingCommand(ReadingCommand): + def __init__(self, semtype_file=None, remove_duplicates=False, depparser=None): + """ + :param semtype_file: name of file where grammar can be loaded + :param remove_duplicates: should duplicates be removed? + :param depparser: the dependency parser + """ + if semtype_file is None: + semtype_file = os.path.join( + "grammars", "sample_grammars", "drt_glue.semtype" + ) + self._glue = DrtGlue( + semtype_file=semtype_file, + remove_duplicates=remove_duplicates, + depparser=depparser, + ) + + def parse_to_readings(self, sentence): + """:see: ReadingCommand.parse_to_readings()""" + return self._glue.parse_to_meaning(sentence) + + def process_thread(self, sentence_readings): + """:see: ReadingCommand.process_thread()""" + try: + return [self.combine_readings(sentence_readings)] + except AnaphoraResolutionException: + return [] + + def combine_readings(self, readings): + """:see: ReadingCommand.combine_readings()""" + thread_reading = reduce(add, readings) + return resolve_anaphora(thread_reading.simplify()) + + def to_fol(self, expression): + """:see: ReadingCommand.to_fol()""" + return expression.fol() + + +class DiscourseTester: + """ + Check properties of an ongoing discourse. + """ + + def __init__(self, input, reading_command=None, background=None): + """ + Initialize a ``DiscourseTester``. + + :param input: the discourse sentences + :type input: list of str + :param background: Formulas which express background assumptions + :type background: list(Expression) + """ + self._input = input + self._sentences = {"s%s" % i: sent for i, sent in enumerate(input)} + self._models = None + self._readings = {} + self._reading_command = ( + reading_command if reading_command else CfgReadingCommand() + ) + self._threads = {} + self._filtered_threads = {} + if background is not None: + from nltk.sem.logic import Expression + + for e in background: + assert isinstance(e, Expression) + self._background = background + else: + self._background = [] + + ############################### + # Sentences + ############################### + + def sentences(self): + """ + Display the list of sentences in the current discourse. + """ + for id in sorted(self._sentences): + print(f"{id}: {self._sentences[id]}") + + def add_sentence(self, sentence, informchk=False, consistchk=False): + """ + Add a sentence to the current discourse. + + Updates ``self._input`` and ``self._sentences``. + :param sentence: An input sentence + :type sentence: str + :param informchk: if ``True``, check that the result of adding the sentence is thread-informative. Updates ``self._readings``. + :param consistchk: if ``True``, check that the result of adding the sentence is thread-consistent. Updates ``self._readings``. + + """ + # check whether the new sentence is informative (i.e. not entailed by the previous discourse) + if informchk: + self.readings(verbose=False) + for tid in sorted(self._threads): + assumptions = [reading for (rid, reading) in self.expand_threads(tid)] + assumptions += self._background + for sent_reading in self._get_readings(sentence): + tp = Prover9Command(goal=sent_reading, assumptions=assumptions) + if tp.prove(): + print( + "Sentence '%s' under reading '%s':" + % (sentence, str(sent_reading)) + ) + print("Not informative relative to thread '%s'" % tid) + + self._input.append(sentence) + self._sentences = {"s%s" % i: sent for i, sent in enumerate(self._input)} + # check whether adding the new sentence to the discourse preserves consistency (i.e. a model can be found for the combined set of + # of assumptions + if consistchk: + self.readings(verbose=False) + self.models(show=False) + + def retract_sentence(self, sentence, verbose=True): + """ + Remove a sentence from the current discourse. + + Updates ``self._input``, ``self._sentences`` and ``self._readings``. + :param sentence: An input sentence + :type sentence: str + :param verbose: If ``True``, report on the updated list of sentences. + """ + try: + self._input.remove(sentence) + except ValueError: + print( + "Retraction failed. The sentence '%s' is not part of the current discourse:" + % sentence + ) + self.sentences() + return None + self._sentences = {"s%s" % i: sent for i, sent in enumerate(self._input)} + self.readings(verbose=False) + if verbose: + print("Current sentences are ") + self.sentences() + + def grammar(self): + """ + Print out the grammar in use for parsing input sentences + """ + show_cfg(self._reading_command._gramfile) + + ############################### + # Readings and Threads + ############################### + + def _get_readings(self, sentence): + """ + Build a list of semantic readings for a sentence. + + :rtype: list(Expression) + """ + return self._reading_command.parse_to_readings(sentence) + + def _construct_readings(self): + """ + Use ``self._sentences`` to construct a value for ``self._readings``. + """ + # re-initialize self._readings in case we have retracted a sentence + self._readings = {} + for sid in sorted(self._sentences): + sentence = self._sentences[sid] + readings = self._get_readings(sentence) + self._readings[sid] = { + f"{sid}-r{rid}": reading.simplify() + for rid, reading in enumerate(sorted(readings, key=str)) + } + + def _construct_threads(self): + """ + Use ``self._readings`` to construct a value for ``self._threads`` + and use the model builder to construct a value for ``self._filtered_threads`` + """ + thread_list = [[]] + for sid in sorted(self._readings): + thread_list = self.multiply(thread_list, sorted(self._readings[sid])) + self._threads = {"d%s" % tid: thread for tid, thread in enumerate(thread_list)} + # re-initialize the filtered threads + self._filtered_threads = {} + # keep the same ids, but only include threads which get models + consistency_checked = self._check_consistency(self._threads) + for (tid, thread) in self._threads.items(): + if (tid, True) in consistency_checked: + self._filtered_threads[tid] = thread + + def _show_readings(self, sentence=None): + """ + Print out the readings for the discourse (or a single sentence). + """ + if sentence is not None: + print("The sentence '%s' has these readings:" % sentence) + for r in [str(reading) for reading in (self._get_readings(sentence))]: + print(" %s" % r) + else: + for sid in sorted(self._readings): + print() + print("%s readings:" % sid) + print() #'-' * 30 + for rid in sorted(self._readings[sid]): + lf = self._readings[sid][rid] + print(f"{rid}: {lf.normalize()}") + + def _show_threads(self, filter=False, show_thread_readings=False): + """ + Print out the value of ``self._threads`` or ``self._filtered_hreads`` + """ + threads = self._filtered_threads if filter else self._threads + for tid in sorted(threads): + if show_thread_readings: + readings = [ + self._readings[rid.split("-")[0]][rid] for rid in self._threads[tid] + ] + try: + thread_reading = ( + ": %s" + % self._reading_command.combine_readings(readings).normalize() + ) + except Exception as e: + thread_reading = ": INVALID: %s" % e.__class__.__name__ + else: + thread_reading = "" + + print("%s:" % tid, self._threads[tid], thread_reading) + + def readings( + self, + sentence=None, + threaded=False, + verbose=True, + filter=False, + show_thread_readings=False, + ): + """ + Construct and show the readings of the discourse (or of a single sentence). + + :param sentence: test just this sentence + :type sentence: str + :param threaded: if ``True``, print out each thread ID and the corresponding thread. + :param filter: if ``True``, only print out consistent thread IDs and threads. + """ + self._construct_readings() + self._construct_threads() + + # if we are filtering or showing thread readings, show threads + if filter or show_thread_readings: + threaded = True + + if verbose: + if not threaded: + self._show_readings(sentence=sentence) + else: + self._show_threads( + filter=filter, show_thread_readings=show_thread_readings + ) + + def expand_threads(self, thread_id, threads=None): + """ + Given a thread ID, find the list of ``logic.Expression`` objects corresponding to the reading IDs in that thread. + + :param thread_id: thread ID + :type thread_id: str + :param threads: a mapping from thread IDs to lists of reading IDs + :type threads: dict + :return: A list of pairs ``(rid, reading)`` where reading is the ``logic.Expression`` associated with a reading ID + :rtype: list of tuple + """ + if threads is None: + threads = self._threads + return [ + (rid, self._readings[sid][rid]) + for rid in threads[thread_id] + for sid in rid.split("-")[:1] + ] + + ############################### + # Models and Background + ############################### + + def _check_consistency(self, threads, show=False, verbose=False): + results = [] + for tid in sorted(threads): + assumptions = [ + reading for (rid, reading) in self.expand_threads(tid, threads=threads) + ] + assumptions = list( + map( + self._reading_command.to_fol, + self._reading_command.process_thread(assumptions), + ) + ) + if assumptions: + assumptions += self._background + # if Mace4 finds a model, it always seems to find it quickly + mb = MaceCommand(None, assumptions, max_models=20) + modelfound = mb.build_model() + else: + modelfound = False + results.append((tid, modelfound)) + if show: + spacer(80) + print("Model for Discourse Thread %s" % tid) + spacer(80) + if verbose: + for a in assumptions: + print(a) + spacer(80) + if modelfound: + print(mb.model(format="cooked")) + else: + print("No model found!\n") + return results + + def models(self, thread_id=None, show=True, verbose=False): + """ + Call Mace4 to build a model for each current discourse thread. + + :param thread_id: thread ID + :type thread_id: str + :param show: If ``True``, display the model that has been found. + """ + self._construct_readings() + self._construct_threads() + threads = {thread_id: self._threads[thread_id]} if thread_id else self._threads + + for (tid, modelfound) in self._check_consistency( + threads, show=show, verbose=verbose + ): + idlist = [rid for rid in threads[tid]] + + if not modelfound: + print(f"Inconsistent discourse: {tid} {idlist}:") + for rid, reading in self.expand_threads(tid): + print(f" {rid}: {reading.normalize()}") + print() + else: + print(f"Consistent discourse: {tid} {idlist}:") + for rid, reading in self.expand_threads(tid): + print(f" {rid}: {reading.normalize()}") + print() + + def add_background(self, background, verbose=False): + """ + Add a list of background assumptions for reasoning about the discourse. + + When called, this method also updates the discourse model's set of readings and threads. + :param background: Formulas which contain background information + :type background: list(Expression) + """ + from nltk.sem.logic import Expression + + for (count, e) in enumerate(background): + assert isinstance(e, Expression) + if verbose: + print("Adding assumption %s to background" % count) + self._background.append(e) + + # update the state + self._construct_readings() + self._construct_threads() + + def background(self): + """ + Show the current background assumptions. + """ + for e in self._background: + print(str(e)) + + ############################### + # Misc + ############################### + + @staticmethod + def multiply(discourse, readings): + """ + Multiply every thread in ``discourse`` by every reading in ``readings``. + + Given discourse = [['A'], ['B']], readings = ['a', 'b', 'c'] , returns + [['A', 'a'], ['A', 'b'], ['A', 'c'], ['B', 'a'], ['B', 'b'], ['B', 'c']] + + :param discourse: the current list of readings + :type discourse: list of lists + :param readings: an additional list of readings + :type readings: list(Expression) + :rtype: A list of lists + """ + result = [] + for sublist in discourse: + for r in readings: + new = [] + new += sublist + new.append(r) + result.append(new) + return result + + +def load_fol(s): + """ + Temporarily duplicated from ``nltk.sem.util``. + Convert a file of first order formulas into a list of ``Expression`` objects. + + :param s: the contents of the file + :type s: str + :return: a list of parsed formulas. + :rtype: list(Expression) + """ + statements = [] + for linenum, line in enumerate(s.splitlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + statements.append(Expression.fromstring(line)) + except Exception as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + return statements + + +############################### +# Demo +############################### +def discourse_demo(reading_command=None): + """ + Illustrate the various methods of ``DiscourseTester`` + """ + dt = DiscourseTester( + ["A boxer walks", "Every boxer chases a girl"], reading_command + ) + dt.models() + print() + # dt.grammar() + print() + dt.sentences() + print() + dt.readings() + print() + dt.readings(threaded=True) + print() + dt.models("d1") + dt.add_sentence("John is a boxer") + print() + dt.sentences() + print() + dt.readings(threaded=True) + print() + dt = DiscourseTester( + ["A student dances", "Every student is a person"], reading_command + ) + print() + dt.add_sentence("No person dances", consistchk=True) + print() + dt.readings() + print() + dt.retract_sentence("No person dances", verbose=True) + print() + dt.models() + print() + dt.readings("A person dances") + print() + dt.add_sentence("A person dances", informchk=True) + dt = DiscourseTester( + ["Vincent is a boxer", "Fido is a boxer", "Vincent is married", "Fido barks"], + reading_command, + ) + dt.readings(filter=True) + import nltk.data + + background_file = os.path.join("grammars", "book_grammars", "background.fol") + background = nltk.data.load(background_file) + + print() + dt.add_background(background, verbose=False) + dt.background() + print() + dt.readings(filter=True) + print() + dt.models() + + +def drt_discourse_demo(reading_command=None): + """ + Illustrate the various methods of ``DiscourseTester`` + """ + dt = DiscourseTester(["every dog chases a boy", "he runs"], reading_command) + dt.models() + print() + dt.sentences() + print() + dt.readings() + print() + dt.readings(show_thread_readings=True) + print() + dt.readings(filter=True, show_thread_readings=True) + + +def spacer(num=30): + print("-" * num) + + +def demo(): + discourse_demo() + + tagger = RegexpTagger( + [ + ("^(chases|runs)$", "VB"), + ("^(a)$", "ex_quant"), + ("^(every)$", "univ_quant"), + ("^(dog|boy)$", "NN"), + ("^(he)$", "PRP"), + ] + ) + depparser = MaltParser(tagger=tagger) + drt_discourse_demo( + DrtGlueReadingCommand(remove_duplicates=False, depparser=depparser) + ) + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/inference/mace.py b/lib/python3.10/site-packages/nltk/inference/mace.py new file mode 100644 index 0000000000000000000000000000000000000000..ee4d9e8e38d7db34c4b58f9c37dee330d397e123 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/inference/mace.py @@ -0,0 +1,383 @@ +# Natural Language Toolkit: Interface to the Mace4 Model Builder +# +# Author: Dan Garrette +# Ewan Klein + +# URL: +# For license information, see LICENSE.TXT + +""" +A model builder that makes use of the external 'Mace4' package. +""" + +import os +import tempfile + +from nltk.inference.api import BaseModelBuilderCommand, ModelBuilder +from nltk.inference.prover9 import Prover9CommandParent, Prover9Parent +from nltk.sem import Expression, Valuation +from nltk.sem.logic import is_indvar + + +class MaceCommand(Prover9CommandParent, BaseModelBuilderCommand): + """ + A ``MaceCommand`` specific to the ``Mace`` model builder. It contains + a print_assumptions() method that is used to print the list + of assumptions in multiple formats. + """ + + _interpformat_bin = None + + def __init__(self, goal=None, assumptions=None, max_models=500, model_builder=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + :param max_models: The maximum number of models that Mace will try before + simply returning false. (Use 0 for no maximum.) + :type max_models: int + """ + if model_builder is not None: + assert isinstance(model_builder, Mace) + else: + model_builder = Mace(max_models) + + BaseModelBuilderCommand.__init__(self, model_builder, goal, assumptions) + + @property + def valuation(mbc): + return mbc.model("valuation") + + def _convert2val(self, valuation_str): + """ + Transform the output file into an NLTK-style Valuation. + + :return: A model if one is generated; None otherwise. + :rtype: sem.Valuation + """ + valuation_standard_format = self._transform_output(valuation_str, "standard") + + val = [] + for line in valuation_standard_format.splitlines(False): + l = line.strip() + + if l.startswith("interpretation"): + # find the number of entities in the model + num_entities = int(l[l.index("(") + 1 : l.index(",")].strip()) + + elif l.startswith("function") and l.find("_") == -1: + # replace the integer identifier with a corresponding alphabetic character + name = l[l.index("(") + 1 : l.index(",")].strip() + if is_indvar(name): + name = name.upper() + value = int(l[l.index("[") + 1 : l.index("]")].strip()) + val.append((name, MaceCommand._make_model_var(value))) + + elif l.startswith("relation"): + l = l[l.index("(") + 1 :] + if "(" in l: + # relation is not nullary + name = l[: l.index("(")].strip() + values = [ + int(v.strip()) + for v in l[l.index("[") + 1 : l.index("]")].split(",") + ] + val.append( + (name, MaceCommand._make_relation_set(num_entities, values)) + ) + else: + # relation is nullary + name = l[: l.index(",")].strip() + value = int(l[l.index("[") + 1 : l.index("]")].strip()) + val.append((name, value == 1)) + + return Valuation(val) + + @staticmethod + def _make_relation_set(num_entities, values): + """ + Convert a Mace4-style relation table into a dictionary. + + :param num_entities: the number of entities in the model; determines the row length in the table. + :type num_entities: int + :param values: a list of 1's and 0's that represent whether a relation holds in a Mace4 model. + :type values: list of int + """ + r = set() + for position in [pos for (pos, v) in enumerate(values) if v == 1]: + r.add( + tuple(MaceCommand._make_relation_tuple(position, values, num_entities)) + ) + return r + + @staticmethod + def _make_relation_tuple(position, values, num_entities): + if len(values) == 1: + return [] + else: + sublist_size = len(values) // num_entities + sublist_start = position // sublist_size + sublist_position = int(position % sublist_size) + + sublist = values[ + sublist_start * sublist_size : (sublist_start + 1) * sublist_size + ] + return [ + MaceCommand._make_model_var(sublist_start) + ] + MaceCommand._make_relation_tuple( + sublist_position, sublist, num_entities + ) + + @staticmethod + def _make_model_var(value): + """ + Pick an alphabetic character as identifier for an entity in the model. + + :param value: where to index into the list of characters + :type value: int + """ + letter = [ + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + ][value] + num = value // 26 + return letter + str(num) if num > 0 else letter + + def _decorate_model(self, valuation_str, format): + """ + Print out a Mace4 model using any Mace4 ``interpformat`` format. + See https://www.cs.unm.edu/~mccune/mace4/manual/ for details. + + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + models. Defaults to 'standard' format. + :return: str + """ + if not format: + return valuation_str + elif format == "valuation": + return self._convert2val(valuation_str) + else: + return self._transform_output(valuation_str, format) + + def _transform_output(self, valuation_str, format): + """ + Transform the output file into any Mace4 ``interpformat`` format. + + :param format: Output format for displaying models. + :type format: str + """ + if format in [ + "standard", + "standard2", + "portable", + "tabular", + "raw", + "cooked", + "xml", + "tex", + ]: + return self._call_interpformat(valuation_str, [format])[0] + else: + raise LookupError("The specified format does not exist") + + def _call_interpformat(self, input_str, args=[], verbose=False): + """ + Call the ``interpformat`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._interpformat_bin is None: + self._interpformat_bin = self._modelbuilder._find_binary( + "interpformat", verbose + ) + + return self._modelbuilder._call( + input_str, self._interpformat_bin, args, verbose + ) + + +class Mace(Prover9Parent, ModelBuilder): + _mace4_bin = None + + def __init__(self, end_size=500): + self._end_size = end_size + """The maximum model size that Mace will try before + simply returning false. (Use -1 for no maximum.)""" + + def _build_model(self, goal=None, assumptions=None, verbose=False): + """ + Use Mace4 to build a first order model. + + :return: ``True`` if a model was found (i.e. Mace returns value of 0), + else ``False`` + """ + if not assumptions: + assumptions = [] + + stdout, returncode = self._call_mace4( + self.prover9_input(goal, assumptions), verbose=verbose + ) + return (returncode == 0, stdout) + + def _call_mace4(self, input_str, args=[], verbose=False): + """ + Call the ``mace4`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._mace4_bin is None: + self._mace4_bin = self._find_binary("mace4", verbose) + + updated_input_str = "" + if self._end_size > 0: + updated_input_str += "assign(end_size, %d).\n\n" % self._end_size + updated_input_str += input_str + + return self._call(updated_input_str, self._mace4_bin, args, verbose) + + +def spacer(num=30): + print("-" * num) + + +def decode_result(found): + """ + Decode the result of model_found() + + :param found: The output of model_found() + :type found: bool + """ + return {True: "Countermodel found", False: "No countermodel found", None: "None"}[ + found + ] + + +def test_model_found(arguments): + """ + Try some proofs and exhibit the results. + """ + for (goal, assumptions) in arguments: + g = Expression.fromstring(goal) + alist = [lp.parse(a) for a in assumptions] + m = MaceCommand(g, assumptions=alist, max_models=50) + found = m.build_model() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {decode_result(found)}\n") + + +def test_build_model(arguments): + """ + Try to build a ``nltk.sem.Valuation``. + """ + g = Expression.fromstring("all x.man(x)") + alist = [ + Expression.fromstring(a) + for a in [ + "man(John)", + "man(Socrates)", + "man(Bill)", + "some x.(-(x = John) & man(x) & sees(John,x))", + "some x.(-(x = Bill) & man(x))", + "all x.some y.(man(x) -> gives(Socrates,x,y))", + ] + ] + + m = MaceCommand(g, assumptions=alist) + m.build_model() + spacer() + print("Assumptions and Goal") + spacer() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {decode_result(m.build_model())}\n") + spacer() + # print(m.model('standard')) + # print(m.model('cooked')) + print("Valuation") + spacer() + print(m.valuation, "\n") + + +def test_transform_output(argument_pair): + """ + Transform the model into various Mace4 ``interpformat`` formats. + """ + g = Expression.fromstring(argument_pair[0]) + alist = [lp.parse(a) for a in argument_pair[1]] + m = MaceCommand(g, assumptions=alist) + m.build_model() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {m.build_model()}\n") + for format in ["standard", "portable", "xml", "cooked"]: + spacer() + print("Using '%s' format" % format) + spacer() + print(m.model(format=format)) + + +def test_make_relation_set(): + print( + MaceCommand._make_relation_set(num_entities=3, values=[1, 0, 1]) + == {("c",), ("a",)} + ) + print( + MaceCommand._make_relation_set( + num_entities=3, values=[0, 0, 0, 0, 0, 0, 1, 0, 0] + ) + == {("c", "a")} + ) + print( + MaceCommand._make_relation_set(num_entities=2, values=[0, 0, 1, 0, 0, 0, 1, 0]) + == {("a", "b", "a"), ("b", "b", "a")} + ) + + +arguments = [ + ("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), + ("(not mortal(Socrates))", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), +] + + +def demo(): + test_model_found(arguments) + test_build_model(arguments) + test_transform_output(arguments[1]) + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py b/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7075ed11e7833201ad98c6fc80406d1ef646db --- /dev/null +++ b/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py @@ -0,0 +1,561 @@ +# Natural Language Toolkit: Nonmonotonic Reasoning +# +# Author: Daniel H. Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +A module to perform nonmonotonic reasoning. The ideas and demonstrations in +this module are based on "Logical Foundations of Artificial Intelligence" by +Michael R. Genesereth and Nils J. Nilsson. +""" + +from collections import defaultdict +from functools import reduce + +from nltk.inference.api import Prover, ProverCommandDecorator +from nltk.inference.prover9 import Prover9, Prover9Command +from nltk.sem.logic import ( + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + BooleanExpression, + EqualityExpression, + ExistsExpression, + Expression, + ImpExpression, + NegatedExpression, + Variable, + VariableExpression, + operator, + unique_variable, +) + + +class ProverParseError(Exception): + pass + + +def get_domain(goal, assumptions): + if goal is None: + all_expressions = assumptions + else: + all_expressions = assumptions + [-goal] + return reduce(operator.or_, (a.constants() for a in all_expressions), set()) + + +class ClosedDomainProver(ProverCommandDecorator): + """ + This is a prover decorator that adds domain closure assumptions before + proving. + """ + + def assumptions(self): + assumptions = [a for a in self._command.assumptions()] + goal = self._command.goal() + domain = get_domain(goal, assumptions) + return [self.replace_quants(ex, domain) for ex in assumptions] + + def goal(self): + goal = self._command.goal() + domain = get_domain(goal, self._command.assumptions()) + return self.replace_quants(goal, domain) + + def replace_quants(self, ex, domain): + """ + Apply the closed domain assumption to the expression + + - Domain = union([e.free()|e.constants() for e in all_expressions]) + - translate "exists x.P" to "(z=d1 | z=d2 | ... ) & P.replace(x,z)" OR + "P.replace(x, d1) | P.replace(x, d2) | ..." + - translate "all x.P" to "P.replace(x, d1) & P.replace(x, d2) & ..." + + :param ex: ``Expression`` + :param domain: set of {Variable}s + :return: ``Expression`` + """ + if isinstance(ex, AllExpression): + conjuncts = [ + ex.term.replace(ex.variable, VariableExpression(d)) for d in domain + ] + conjuncts = [self.replace_quants(c, domain) for c in conjuncts] + return reduce(lambda x, y: x & y, conjuncts) + elif isinstance(ex, BooleanExpression): + return ex.__class__( + self.replace_quants(ex.first, domain), + self.replace_quants(ex.second, domain), + ) + elif isinstance(ex, NegatedExpression): + return -self.replace_quants(ex.term, domain) + elif isinstance(ex, ExistsExpression): + disjuncts = [ + ex.term.replace(ex.variable, VariableExpression(d)) for d in domain + ] + disjuncts = [self.replace_quants(d, domain) for d in disjuncts] + return reduce(lambda x, y: x | y, disjuncts) + else: + return ex + + +class UniqueNamesProver(ProverCommandDecorator): + """ + This is a prover decorator that adds unique names assumptions before + proving. + """ + + def assumptions(self): + """ + - Domain = union([e.free()|e.constants() for e in all_expressions]) + - if "d1 = d2" cannot be proven from the premises, then add "d1 != d2" + """ + assumptions = self._command.assumptions() + + domain = list(get_domain(self._command.goal(), assumptions)) + + # build a dictionary of obvious equalities + eq_sets = SetHolder() + for a in assumptions: + if isinstance(a, EqualityExpression): + av = a.first.variable + bv = a.second.variable + # put 'a' and 'b' in the same set + eq_sets[av].add(bv) + + new_assumptions = [] + for i, a in enumerate(domain): + for b in domain[i + 1 :]: + # if a and b are not already in the same equality set + if b not in eq_sets[a]: + newEqEx = EqualityExpression( + VariableExpression(a), VariableExpression(b) + ) + if Prover9().prove(newEqEx, assumptions): + # we can prove that the names are the same entity. + # remember that they are equal so we don't re-check. + eq_sets[a].add(b) + else: + # we can't prove it, so assume unique names + new_assumptions.append(-newEqEx) + + return assumptions + new_assumptions + + +class SetHolder(list): + """ + A list of sets of Variables. + """ + + def __getitem__(self, item): + """ + :param item: ``Variable`` + :return: the set containing 'item' + """ + assert isinstance(item, Variable) + for s in self: + if item in s: + return s + # item is not found in any existing set. so create a new set + new = {item} + self.append(new) + return new + + +class ClosedWorldProver(ProverCommandDecorator): + """ + This is a prover decorator that completes predicates before proving. + + If the assumptions contain "P(A)", then "all x.(P(x) -> (x=A))" is the completion of "P". + If the assumptions contain "all x.(ostrich(x) -> bird(x))", then "all x.(bird(x) -> ostrich(x))" is the completion of "bird". + If the assumptions don't contain anything that are "P", then "all x.-P(x)" is the completion of "P". + + walk(Socrates) + Socrates != Bill + + all x.(walk(x) -> (x=Socrates)) + ---------------- + -walk(Bill) + + see(Socrates, John) + see(John, Mary) + Socrates != John + John != Mary + + all x.all y.(see(x,y) -> ((x=Socrates & y=John) | (x=John & y=Mary))) + ---------------- + -see(Socrates, Mary) + + all x.(ostrich(x) -> bird(x)) + bird(Tweety) + -ostrich(Sam) + Sam != Tweety + + all x.(bird(x) -> (ostrich(x) | x=Tweety)) + + all x.-ostrich(x) + ------------------- + -bird(Sam) + """ + + def assumptions(self): + assumptions = self._command.assumptions() + + predicates = self._make_predicate_dict(assumptions) + + new_assumptions = [] + for p in predicates: + predHolder = predicates[p] + new_sig = self._make_unique_signature(predHolder) + new_sig_exs = [VariableExpression(v) for v in new_sig] + + disjuncts = [] + + # Turn the signatures into disjuncts + for sig in predHolder.signatures: + equality_exs = [] + for v1, v2 in zip(new_sig_exs, sig): + equality_exs.append(EqualityExpression(v1, v2)) + disjuncts.append(reduce(lambda x, y: x & y, equality_exs)) + + # Turn the properties into disjuncts + for prop in predHolder.properties: + # replace variables from the signature with new sig variables + bindings = {} + for v1, v2 in zip(new_sig_exs, prop[0]): + bindings[v2] = v1 + disjuncts.append(prop[1].substitute_bindings(bindings)) + + # make the assumption + if disjuncts: + # disjuncts exist, so make an implication + antecedent = self._make_antecedent(p, new_sig) + consequent = reduce(lambda x, y: x | y, disjuncts) + accum = ImpExpression(antecedent, consequent) + else: + # nothing has property 'p' + accum = NegatedExpression(self._make_antecedent(p, new_sig)) + + # quantify the implication + for new_sig_var in new_sig[::-1]: + accum = AllExpression(new_sig_var, accum) + new_assumptions.append(accum) + + return assumptions + new_assumptions + + def _make_unique_signature(self, predHolder): + """ + This method figures out how many arguments the predicate takes and + returns a tuple containing that number of unique variables. + """ + return tuple(unique_variable() for i in range(predHolder.signature_len)) + + def _make_antecedent(self, predicate, signature): + """ + Return an application expression with 'predicate' as the predicate + and 'signature' as the list of arguments. + """ + antecedent = predicate + for v in signature: + antecedent = antecedent(VariableExpression(v)) + return antecedent + + def _make_predicate_dict(self, assumptions): + """ + Create a dictionary of predicates from the assumptions. + + :param assumptions: a list of ``Expression``s + :return: dict mapping ``AbstractVariableExpression`` to ``PredHolder`` + """ + predicates = defaultdict(PredHolder) + for a in assumptions: + self._map_predicates(a, predicates) + return predicates + + def _map_predicates(self, expression, predDict): + if isinstance(expression, ApplicationExpression): + func, args = expression.uncurry() + if isinstance(func, AbstractVariableExpression): + predDict[func].append_sig(tuple(args)) + elif isinstance(expression, AndExpression): + self._map_predicates(expression.first, predDict) + self._map_predicates(expression.second, predDict) + elif isinstance(expression, AllExpression): + # collect all the universally quantified variables + sig = [expression.variable] + term = expression.term + while isinstance(term, AllExpression): + sig.append(term.variable) + term = term.term + if isinstance(term, ImpExpression): + if isinstance(term.first, ApplicationExpression) and isinstance( + term.second, ApplicationExpression + ): + func1, args1 = term.first.uncurry() + func2, args2 = term.second.uncurry() + if ( + isinstance(func1, AbstractVariableExpression) + and isinstance(func2, AbstractVariableExpression) + and sig == [v.variable for v in args1] + and sig == [v.variable for v in args2] + ): + predDict[func2].append_prop((tuple(sig), term.first)) + predDict[func1].validate_sig_len(sig) + + +class PredHolder: + """ + This class will be used by a dictionary that will store information + about predicates to be used by the ``ClosedWorldProver``. + + The 'signatures' property is a list of tuples defining signatures for + which the predicate is true. For instance, 'see(john, mary)' would be + result in the signature '(john,mary)' for 'see'. + + The second element of the pair is a list of pairs such that the first + element of the pair is a tuple of variables and the second element is an + expression of those variables that makes the predicate true. For instance, + 'all x.all y.(see(x,y) -> know(x,y))' would result in "((x,y),('see(x,y)'))" + for 'know'. + """ + + def __init__(self): + self.signatures = [] + self.properties = [] + self.signature_len = None + + def append_sig(self, new_sig): + self.validate_sig_len(new_sig) + self.signatures.append(new_sig) + + def append_prop(self, new_prop): + self.validate_sig_len(new_prop[0]) + self.properties.append(new_prop) + + def validate_sig_len(self, new_sig): + if self.signature_len is None: + self.signature_len = len(new_sig) + elif self.signature_len != len(new_sig): + raise Exception("Signature lengths do not match") + + def __str__(self): + return f"({self.signatures},{self.properties},{self.signature_len})" + + def __repr__(self): + return "%s" % self + + +def closed_domain_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + p3 = lexpr(r"-walk(Bill)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + p3 = lexpr(r"-walk(Bill)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"walk(Socrates)") + p2 = lexpr(r"walk(Bill)") + c = lexpr(r"all x.walk(x)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"girl(mary)") + p2 = lexpr(r"dog(rover)") + p3 = lexpr(r"all x.(girl(x) -> -dog(x))") + p4 = lexpr(r"all x.(dog(x) -> -girl(x))") + p5 = lexpr(r"chase(mary, rover)") + c = lexpr(r"exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))") + prover = Prover9Command(c, [p1, p2, p3, p4, p5]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + +def unique_names_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"man(Socrates)") + p2 = lexpr(r"man(Bill)") + c = lexpr(r"exists x.exists y.(x != y)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + unp = UniqueNamesProver(prover) + print("assumptions:") + for a in unp.assumptions(): + print(" ", a) + print("goal:", unp.goal()) + print(unp.prove()) + + p1 = lexpr(r"all x.(walk(x) -> (x = Socrates))") + p2 = lexpr(r"Bill = William") + p3 = lexpr(r"Bill = Billy") + c = lexpr(r"-walk(William)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + unp = UniqueNamesProver(prover) + print("assumptions:") + for a in unp.assumptions(): + print(" ", a) + print("goal:", unp.goal()) + print(unp.prove()) + + +def closed_world_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"walk(Socrates)") + p2 = lexpr(r"(Socrates != Bill)") + c = lexpr(r"-walk(Bill)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + p1 = lexpr(r"see(Socrates, John)") + p2 = lexpr(r"see(John, Mary)") + p3 = lexpr(r"(Socrates != John)") + p4 = lexpr(r"(John != Mary)") + c = lexpr(r"-see(Socrates, Mary)") + prover = Prover9Command(c, [p1, p2, p3, p4]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + p1 = lexpr(r"all x.(ostrich(x) -> bird(x))") + p2 = lexpr(r"bird(Tweety)") + p3 = lexpr(r"-ostrich(Sam)") + p4 = lexpr(r"Sam != Tweety") + c = lexpr(r"-bird(Sam)") + prover = Prover9Command(c, [p1, p2, p3, p4]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + +def combination_prover_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"see(Socrates, John)") + p2 = lexpr(r"see(John, Mary)") + c = lexpr(r"-see(Socrates, Mary)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + command = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover))) + for a in command.assumptions(): + print(a) + print(command.prove()) + + +def default_reasoning_demo(): + lexpr = Expression.fromstring + + premises = [] + + # define taxonomy + premises.append(lexpr(r"all x.(elephant(x) -> animal(x))")) + premises.append(lexpr(r"all x.(bird(x) -> animal(x))")) + premises.append(lexpr(r"all x.(dove(x) -> bird(x))")) + premises.append(lexpr(r"all x.(ostrich(x) -> bird(x))")) + premises.append(lexpr(r"all x.(flying_ostrich(x) -> ostrich(x))")) + + # default properties + premises.append( + lexpr(r"all x.((animal(x) & -Ab1(x)) -> -fly(x))") + ) # normal animals don't fly + premises.append( + lexpr(r"all x.((bird(x) & -Ab2(x)) -> fly(x))") + ) # normal birds fly + premises.append( + lexpr(r"all x.((ostrich(x) & -Ab3(x)) -> -fly(x))") + ) # normal ostriches don't fly + + # specify abnormal entities + premises.append(lexpr(r"all x.(bird(x) -> Ab1(x))")) # flight + premises.append(lexpr(r"all x.(ostrich(x) -> Ab2(x))")) # non-flying bird + premises.append(lexpr(r"all x.(flying_ostrich(x) -> Ab3(x))")) # flying ostrich + + # define entities + premises.append(lexpr(r"elephant(E)")) + premises.append(lexpr(r"dove(D)")) + premises.append(lexpr(r"ostrich(O)")) + + # print the assumptions + prover = Prover9Command(None, premises) + command = UniqueNamesProver(ClosedWorldProver(prover)) + for a in command.assumptions(): + print(a) + + print_proof("-fly(E)", premises) + print_proof("fly(D)", premises) + print_proof("-fly(O)", premises) + + +def print_proof(goal, premises): + lexpr = Expression.fromstring + prover = Prover9Command(lexpr(goal), premises) + command = UniqueNamesProver(ClosedWorldProver(prover)) + print(goal, prover.prove(), command.prove()) + + +def demo(): + closed_domain_demo() + unique_names_demo() + closed_world_demo() + combination_prover_demo() + default_reasoning_demo() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/inference/prover9.py b/lib/python3.10/site-packages/nltk/inference/prover9.py new file mode 100644 index 0000000000000000000000000000000000000000..73345f27473f011a7628c91834606f6e1f532044 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/inference/prover9.py @@ -0,0 +1,508 @@ +# Natural Language Toolkit: Interface to the Prover9 Theorem Prover +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Garrette +# Ewan Klein +# +# URL: +# For license information, see LICENSE.TXT +""" +A theorem prover that makes use of the external 'Prover9' package. +""" + +import os +import subprocess + +import nltk +from nltk.inference.api import BaseProverCommand, Prover +from nltk.sem.logic import ( + AllExpression, + AndExpression, + EqualityExpression, + ExistsExpression, + Expression, + IffExpression, + ImpExpression, + NegatedExpression, + OrExpression, +) + +# +# Following is not yet used. Return code for 2 actually realized as 512. +# +p9_return_codes = { + 0: True, + 1: "(FATAL)", # A fatal error occurred (user's syntax error). + 2: False, # (SOS_EMPTY) Prover9 ran out of things to do + # (sos list exhausted). + 3: "(MAX_MEGS)", # The max_megs (memory limit) parameter was exceeded. + 4: "(MAX_SECONDS)", # The max_seconds parameter was exceeded. + 5: "(MAX_GIVEN)", # The max_given parameter was exceeded. + 6: "(MAX_KEPT)", # The max_kept parameter was exceeded. + 7: "(ACTION)", # A Prover9 action terminated the search. + 101: "(SIGSEGV)", # Prover9 crashed, most probably due to a bug. +} + + +class Prover9CommandParent: + """ + A common base class used by both ``Prover9Command`` and ``MaceCommand``, + which is responsible for maintaining a goal and a set of assumptions, + and generating prover9-style input files from them. + """ + + def print_assumptions(self, output_format="nltk"): + """ + Print the list of the current assumptions. + """ + if output_format.lower() == "nltk": + for a in self.assumptions(): + print(a) + elif output_format.lower() == "prover9": + for a in convert_to_prover9(self.assumptions()): + print(a) + else: + raise NameError( + "Unrecognized value for 'output_format': %s" % output_format + ) + + +class Prover9Command(Prover9CommandParent, BaseProverCommand): + """ + A ``ProverCommand`` specific to the ``Prover9`` prover. It contains + the a print_assumptions() method that is used to print the list + of assumptions in multiple formats. + """ + + def __init__(self, goal=None, assumptions=None, timeout=60, prover=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + :param timeout: number of seconds before timeout; set to 0 for + no timeout. + :type timeout: int + :param prover: a prover. If not set, one will be created. + :type prover: Prover9 + """ + if not assumptions: + assumptions = [] + + if prover is not None: + assert isinstance(prover, Prover9) + else: + prover = Prover9(timeout) + + BaseProverCommand.__init__(self, prover, goal, assumptions) + + def decorate_proof(self, proof_string, simplify=True): + """ + :see BaseProverCommand.decorate_proof() + """ + if simplify: + return self._prover._call_prooftrans(proof_string, ["striplabels"])[ + 0 + ].rstrip() + else: + return proof_string.rstrip() + + +class Prover9Parent: + """ + A common class extended by both ``Prover9`` and ``Mace ``. + It contains the functionality required to convert NLTK-style + expressions into Prover9-style expressions. + """ + + _binary_location = None + + def config_prover9(self, binary_location, verbose=False): + if binary_location is None: + self._binary_location = None + self._prover9_bin = None + else: + name = "prover9" + self._prover9_bin = nltk.internals.find_binary( + name, + path_to_bin=binary_location, + env_vars=["PROVER9"], + url="https://www.cs.unm.edu/~mccune/prover9/", + binary_names=[name, name + ".exe"], + verbose=verbose, + ) + self._binary_location = self._prover9_bin.rsplit(os.path.sep, 1) + + def prover9_input(self, goal, assumptions): + """ + :return: The input string that should be provided to the + prover9 binary. This string is formed based on the goal, + assumptions, and timeout value of this object. + """ + s = "" + + if assumptions: + s += "formulas(assumptions).\n" + for p9_assumption in convert_to_prover9(assumptions): + s += " %s.\n" % p9_assumption + s += "end_of_list.\n\n" + + if goal: + s += "formulas(goals).\n" + s += " %s.\n" % convert_to_prover9(goal) + s += "end_of_list.\n\n" + + return s + + def binary_locations(self): + """ + A list of directories that should be searched for the prover9 + executables. This list is used by ``config_prover9`` when searching + for the prover9 executables. + """ + return [ + "/usr/local/bin/prover9", + "/usr/local/bin/prover9/bin", + "/usr/local/bin", + "/usr/bin", + "/usr/local/prover9", + "/usr/local/share/prover9", + ] + + def _find_binary(self, name, verbose=False): + binary_locations = self.binary_locations() + if self._binary_location is not None: + binary_locations += [self._binary_location] + return nltk.internals.find_binary( + name, + searchpath=binary_locations, + env_vars=["PROVER9"], + url="https://www.cs.unm.edu/~mccune/prover9/", + binary_names=[name, name + ".exe"], + verbose=verbose, + ) + + def _call(self, input_str, binary, args=[], verbose=False): + """ + Call the binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param binary: The location of the binary to call + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if verbose: + print("Calling:", binary) + print("Args:", args) + print("Input:\n", input_str, "\n") + + # Call prover9 via a subprocess + cmd = [binary] + args + try: + input_str = input_str.encode("utf8") + except AttributeError: + pass + p = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE + ) + (stdout, stderr) = p.communicate(input=input_str) + + if verbose: + print("Return code:", p.returncode) + if stdout: + print("stdout:\n", stdout, "\n") + if stderr: + print("stderr:\n", stderr, "\n") + + return (stdout.decode("utf-8"), p.returncode) + + +def convert_to_prover9(input): + """ + Convert a ``logic.Expression`` to Prover9 format. + """ + if isinstance(input, list): + result = [] + for s in input: + try: + result.append(_convert_to_prover9(s.simplify())) + except: + print("input %s cannot be converted to Prover9 input syntax" % input) + raise + return result + else: + try: + return _convert_to_prover9(input.simplify()) + except: + print("input %s cannot be converted to Prover9 input syntax" % input) + raise + + +def _convert_to_prover9(expression): + """ + Convert ``logic.Expression`` to Prover9 formatted string. + """ + if isinstance(expression, ExistsExpression): + return ( + "exists " + + str(expression.variable) + + " " + + _convert_to_prover9(expression.term) + ) + elif isinstance(expression, AllExpression): + return ( + "all " + + str(expression.variable) + + " " + + _convert_to_prover9(expression.term) + ) + elif isinstance(expression, NegatedExpression): + return "-(" + _convert_to_prover9(expression.term) + ")" + elif isinstance(expression, AndExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " & " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, OrExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " | " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, ImpExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " -> " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, IffExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " <-> " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, EqualityExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " = " + + _convert_to_prover9(expression.second) + + ")" + ) + else: + return str(expression) + + +class Prover9(Prover9Parent, Prover): + _prover9_bin = None + _prooftrans_bin = None + + def __init__(self, timeout=60): + self._timeout = timeout + """The timeout value for prover9. If a proof can not be found + in this amount of time, then prover9 will return false. + (Use 0 for no timeout.)""" + + def _prove(self, goal=None, assumptions=None, verbose=False): + """ + Use Prover9 to prove a theorem. + :return: A pair whose first element is a boolean indicating if the + proof was successful (i.e. returns value of 0) and whose second element + is the output of the prover. + """ + if not assumptions: + assumptions = [] + + stdout, returncode = self._call_prover9( + self.prover9_input(goal, assumptions), verbose=verbose + ) + return (returncode == 0, stdout) + + def prover9_input(self, goal, assumptions): + """ + :see: Prover9Parent.prover9_input + """ + s = "clear(auto_denials).\n" # only one proof required + return s + Prover9Parent.prover9_input(self, goal, assumptions) + + def _call_prover9(self, input_str, args=[], verbose=False): + """ + Call the ``prover9`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._prover9_bin is None: + self._prover9_bin = self._find_binary("prover9", verbose) + + updated_input_str = "" + if self._timeout > 0: + updated_input_str += "assign(max_seconds, %d).\n\n" % self._timeout + updated_input_str += input_str + + stdout, returncode = self._call( + updated_input_str, self._prover9_bin, args, verbose + ) + + if returncode not in [0, 2]: + errormsgprefix = "%%ERROR:" + if errormsgprefix in stdout: + msgstart = stdout.index(errormsgprefix) + errormsg = stdout[msgstart:].strip() + else: + errormsg = None + if returncode in [3, 4, 5, 6]: + raise Prover9LimitExceededException(returncode, errormsg) + else: + raise Prover9FatalException(returncode, errormsg) + + return stdout, returncode + + def _call_prooftrans(self, input_str, args=[], verbose=False): + """ + Call the ``prooftrans`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._prooftrans_bin is None: + self._prooftrans_bin = self._find_binary("prooftrans", verbose) + + return self._call(input_str, self._prooftrans_bin, args, verbose) + + +class Prover9Exception(Exception): + def __init__(self, returncode, message): + msg = p9_return_codes[returncode] + if message: + msg += "\n%s" % message + Exception.__init__(self, msg) + + +class Prover9FatalException(Prover9Exception): + pass + + +class Prover9LimitExceededException(Prover9Exception): + pass + + +###################################################################### +# { Tests and Demos +###################################################################### + + +def test_config(): + + a = Expression.fromstring("(walk(j) & sing(j))") + g = Expression.fromstring("walk(j)") + p = Prover9Command(g, assumptions=[a]) + p._executable_path = None + p.prover9_search = [] + p.prove() + # config_prover9('/usr/local/bin') + print(p.prove()) + print(p.proof()) + + +def test_convert_to_prover9(expr): + """ + Test that parsing works OK. + """ + for t in expr: + e = Expression.fromstring(t) + print(convert_to_prover9(e)) + + +def test_prove(arguments): + """ + Try some proofs and exhibit the results. + """ + for (goal, assumptions) in arguments: + g = Expression.fromstring(goal) + alist = [Expression.fromstring(a) for a in assumptions] + p = Prover9Command(g, assumptions=alist).prove() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {p}\n") + + +arguments = [ + ("(man(x) <-> (not (not man(x))))", []), + ("(not (man(x) & (not man(x))))", []), + ("(man(x) | (not man(x)))", []), + ("(man(x) & (not man(x)))", []), + ("(man(x) -> man(x))", []), + ("(not (man(x) & (not man(x))))", []), + ("(man(x) | (not man(x)))", []), + ("(man(x) -> man(x))", []), + ("(man(x) <-> man(x))", []), + ("(not (man(x) <-> (not man(x))))", []), + ("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), + ("((all x.(man(x) -> walks(x)) & man(Socrates)) -> some y.walks(y))", []), + ("(all x.man(x) -> all x.man(x))", []), + ("some x.all y.sees(x,y)", []), + ( + "some e3.(walk(e3) & subj(e3, mary))", + [ + "some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))" + ], + ), + ( + "some x e1.(see(e1) & subj(e1, x) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))", + [ + "some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))" + ], + ), +] + +expressions = [ + r"some x y.sees(x,y)", + r"some x.(man(x) & walks(x))", + r"\x.(man(x) & walks(x))", + r"\x y.sees(x,y)", + r"walks(john)", + r"\x.big(x, \y.mouse(y))", + r"(walks(x) & (runs(x) & (threes(x) & fours(x))))", + r"(walks(x) -> runs(x))", + r"some x.(PRO(x) & sees(John, x))", + r"some x.(man(x) & (not walks(x)))", + r"all x.(man(x) -> walks(x))", +] + + +def spacer(num=45): + print("-" * num) + + +def demo(): + print("Testing configuration") + spacer() + test_config() + print() + print("Testing conversion to Prover9 format") + spacer() + test_convert_to_prover9(expressions) + print() + print("Testing proofs") + spacer() + test_prove(arguments) + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/inference/tableau.py b/lib/python3.10/site-packages/nltk/inference/tableau.py new file mode 100644 index 0000000000000000000000000000000000000000..620f21b465225f3d8dc91a05414bfd9bbbe3e5c2 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/inference/tableau.py @@ -0,0 +1,712 @@ +# Natural Language Toolkit: First-Order Tableau Theorem Prover +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Garrette +# +# URL: +# For license information, see LICENSE.TXT + +""" +Module for a tableau-based First Order theorem prover. +""" + +from nltk.inference.api import BaseProverCommand, Prover +from nltk.internals import Counter +from nltk.sem.logic import ( + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + EqualityExpression, + ExistsExpression, + Expression, + FunctionVariableExpression, + IffExpression, + ImpExpression, + LambdaExpression, + NegatedExpression, + OrExpression, + Variable, + VariableExpression, + unique_variable, +) + +_counter = Counter() + + +class ProverParseError(Exception): + pass + + +class TableauProver(Prover): + _assume_false = False + + def _prove(self, goal=None, assumptions=None, verbose=False): + if not assumptions: + assumptions = [] + + result = None + try: + agenda = Agenda() + if goal: + agenda.put(-goal) + agenda.put_all(assumptions) + debugger = Debug(verbose) + result = self._attempt_proof(agenda, set(), set(), debugger) + except RuntimeError as e: + if self._assume_false and str(e).startswith( + "maximum recursion depth exceeded" + ): + result = False + else: + if verbose: + print(e) + else: + raise e + return (result, "\n".join(debugger.lines)) + + def _attempt_proof(self, agenda, accessible_vars, atoms, debug): + (current, context), category = agenda.pop_first() + + # if there's nothing left in the agenda, and we haven't closed the path + if not current: + debug.line("AGENDA EMPTY") + return False + + proof_method = { + Categories.ATOM: self._attempt_proof_atom, + Categories.PROP: self._attempt_proof_prop, + Categories.N_ATOM: self._attempt_proof_n_atom, + Categories.N_PROP: self._attempt_proof_n_prop, + Categories.APP: self._attempt_proof_app, + Categories.N_APP: self._attempt_proof_n_app, + Categories.N_EQ: self._attempt_proof_n_eq, + Categories.D_NEG: self._attempt_proof_d_neg, + Categories.N_ALL: self._attempt_proof_n_all, + Categories.N_EXISTS: self._attempt_proof_n_some, + Categories.AND: self._attempt_proof_and, + Categories.N_OR: self._attempt_proof_n_or, + Categories.N_IMP: self._attempt_proof_n_imp, + Categories.OR: self._attempt_proof_or, + Categories.IMP: self._attempt_proof_imp, + Categories.N_AND: self._attempt_proof_n_and, + Categories.IFF: self._attempt_proof_iff, + Categories.N_IFF: self._attempt_proof_n_iff, + Categories.EQ: self._attempt_proof_eq, + Categories.EXISTS: self._attempt_proof_some, + Categories.ALL: self._attempt_proof_all, + }[category] + + debug.line((current, context)) + return proof_method(current, context, agenda, accessible_vars, atoms, debug) + + def _attempt_proof_atom( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current, True) in atoms: + debug.line("CLOSED", 1) + return True + + if context: + if isinstance(context.term, NegatedExpression): + current = current.negate() + agenda.put(context(current).simplify()) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + else: + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, + accessible_vars | set(current.args), + atoms | {(current, False)}, + debug + 1, + ) + + def _attempt_proof_n_atom( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current.term, False) in atoms: + debug.line("CLOSED", 1) + return True + + if context: + if isinstance(context.term, NegatedExpression): + current = current.negate() + agenda.put(context(current).simplify()) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + else: + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, + accessible_vars | set(current.term.args), + atoms | {(current.term, True)}, + debug + 1, + ) + + def _attempt_proof_prop( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current, True) in atoms: + debug.line("CLOSED", 1) + return True + + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars, atoms | {(current, False)}, debug + 1 + ) + + def _attempt_proof_n_prop( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current.term, False) in atoms: + debug.line("CLOSED", 1) + return True + + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars, atoms | {(current.term, True)}, debug + 1 + ) + + def _attempt_proof_app( + self, current, context, agenda, accessible_vars, atoms, debug + ): + f, args = current.uncurry() + for i, arg in enumerate(args): + if not TableauProver.is_atom(arg): + ctx = f + nv = Variable("X%s" % _counter.get()) + for j, a in enumerate(args): + ctx = ctx(VariableExpression(nv)) if i == j else ctx(a) + if context: + ctx = context(ctx).simplify() + ctx = LambdaExpression(nv, ctx) + agenda.put(arg, ctx) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + raise Exception("If this method is called, there must be a non-atomic argument") + + def _attempt_proof_n_app( + self, current, context, agenda, accessible_vars, atoms, debug + ): + f, args = current.term.uncurry() + for i, arg in enumerate(args): + if not TableauProver.is_atom(arg): + ctx = f + nv = Variable("X%s" % _counter.get()) + for j, a in enumerate(args): + ctx = ctx(VariableExpression(nv)) if i == j else ctx(a) + if context: + # combine new context with existing + ctx = context(ctx).simplify() + ctx = LambdaExpression(nv, -ctx) + agenda.put(-arg, ctx) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + raise Exception("If this method is called, there must be a non-atomic argument") + + def _attempt_proof_n_eq( + self, current, context, agenda, accessible_vars, atoms, debug + ): + ########################################################################### + # Since 'current' is of type '~(a=b)', the path is closed if 'a' == 'b' + ########################################################################### + if current.term.first == current.term.second: + debug.line("CLOSED", 1) + return True + + agenda[Categories.N_EQ].add((current, context)) + current._exhausted = True + return self._attempt_proof( + agenda, + accessible_vars | {current.term.first, current.term.second}, + atoms, + debug + 1, + ) + + def _attempt_proof_d_neg( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(current.term.term, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_all( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda[Categories.EXISTS].add( + (ExistsExpression(current.term.variable, -current.term.term), context) + ) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_some( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda[Categories.ALL].add( + (AllExpression(current.term.variable, -current.term.term), context) + ) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_and( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(current.first, context) + agenda.put(current.second, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_or( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(-current.term.first, context) + agenda.put(-current.term.second, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_imp( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(current.term.first, context) + agenda.put(-current.term.second, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_or( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(current.first, context) + new_agenda.put(current.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_imp( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(-current.first, context) + new_agenda.put(current.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_and( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(-current.term.first, context) + new_agenda.put(-current.term.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_iff( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(current.first, context) + agenda.put(current.second, context) + new_agenda.put(-current.first, context) + new_agenda.put(-current.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_iff( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(current.term.first, context) + agenda.put(-current.term.second, context) + new_agenda.put(-current.term.first, context) + new_agenda.put(current.term.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_eq( + self, current, context, agenda, accessible_vars, atoms, debug + ): + ######################################################################### + # Since 'current' is of the form '(a = b)', replace ALL free instances + # of 'a' with 'b' + ######################################################################### + agenda.put_atoms(atoms) + agenda.replace_all(current.first, current.second) + accessible_vars.discard(current.first) + agenda.mark_neqs_fresh() + return self._attempt_proof(agenda, accessible_vars, set(), debug + 1) + + def _attempt_proof_some( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_unique_variable = VariableExpression(unique_variable()) + agenda.put(current.term.replace(current.variable, new_unique_variable), context) + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1 + ) + + def _attempt_proof_all( + self, current, context, agenda, accessible_vars, atoms, debug + ): + try: + current._used_vars + except AttributeError: + current._used_vars = set() + + # if there are accessible_vars on the path + if accessible_vars: + # get the set of bound variables that have not be used by this AllExpression + bv_available = accessible_vars - current._used_vars + + if bv_available: + variable_to_use = list(bv_available)[0] + debug.line("--> Using '%s'" % variable_to_use, 2) + current._used_vars |= {variable_to_use} + agenda.put( + current.term.replace(current.variable, variable_to_use), context + ) + agenda[Categories.ALL].add((current, context)) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + else: + # no more available variables to substitute + debug.line("--> Variables Exhausted", 2) + current._exhausted = True + agenda[Categories.ALL].add((current, context)) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + else: + new_unique_variable = VariableExpression(unique_variable()) + debug.line("--> Using '%s'" % new_unique_variable, 2) + current._used_vars |= {new_unique_variable} + agenda.put( + current.term.replace(current.variable, new_unique_variable), context + ) + agenda[Categories.ALL].add((current, context)) + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1 + ) + + @staticmethod + def is_atom(e): + if isinstance(e, NegatedExpression): + e = e.term + + if isinstance(e, ApplicationExpression): + for arg in e.args: + if not TableauProver.is_atom(arg): + return False + return True + elif isinstance(e, AbstractVariableExpression) or isinstance( + e, LambdaExpression + ): + return True + else: + return False + + +class TableauProverCommand(BaseProverCommand): + def __init__(self, goal=None, assumptions=None, prover=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + """ + if prover is not None: + assert isinstance(prover, TableauProver) + else: + prover = TableauProver() + + BaseProverCommand.__init__(self, prover, goal, assumptions) + + +class Agenda: + def __init__(self): + self.sets = tuple(set() for i in range(21)) + + def clone(self): + new_agenda = Agenda() + set_list = [s.copy() for s in self.sets] + + new_allExs = set() + for allEx, _ in set_list[Categories.ALL]: + new_allEx = AllExpression(allEx.variable, allEx.term) + try: + new_allEx._used_vars = {used for used in allEx._used_vars} + except AttributeError: + new_allEx._used_vars = set() + new_allExs.add((new_allEx, None)) + set_list[Categories.ALL] = new_allExs + + set_list[Categories.N_EQ] = { + (NegatedExpression(n_eq.term), ctx) + for (n_eq, ctx) in set_list[Categories.N_EQ] + } + + new_agenda.sets = tuple(set_list) + return new_agenda + + def __getitem__(self, index): + return self.sets[index] + + def put(self, expression, context=None): + if isinstance(expression, AllExpression): + ex_to_add = AllExpression(expression.variable, expression.term) + try: + ex_to_add._used_vars = {used for used in expression._used_vars} + except AttributeError: + ex_to_add._used_vars = set() + else: + ex_to_add = expression + self.sets[self._categorize_expression(ex_to_add)].add((ex_to_add, context)) + + def put_all(self, expressions): + for expression in expressions: + self.put(expression) + + def put_atoms(self, atoms): + for atom, neg in atoms: + if neg: + self[Categories.N_ATOM].add((-atom, None)) + else: + self[Categories.ATOM].add((atom, None)) + + def pop_first(self): + """Pop the first expression that appears in the agenda""" + for i, s in enumerate(self.sets): + if s: + if i in [Categories.N_EQ, Categories.ALL]: + for ex in s: + try: + if not ex[0]._exhausted: + s.remove(ex) + return (ex, i) + except AttributeError: + s.remove(ex) + return (ex, i) + else: + return (s.pop(), i) + return ((None, None), None) + + def replace_all(self, old, new): + for s in self.sets: + for ex, ctx in s: + ex.replace(old.variable, new) + if ctx is not None: + ctx.replace(old.variable, new) + + def mark_alls_fresh(self): + for u, _ in self.sets[Categories.ALL]: + u._exhausted = False + + def mark_neqs_fresh(self): + for neq, _ in self.sets[Categories.N_EQ]: + neq._exhausted = False + + def _categorize_expression(self, current): + if isinstance(current, NegatedExpression): + return self._categorize_NegatedExpression(current) + elif isinstance(current, FunctionVariableExpression): + return Categories.PROP + elif TableauProver.is_atom(current): + return Categories.ATOM + elif isinstance(current, AllExpression): + return Categories.ALL + elif isinstance(current, AndExpression): + return Categories.AND + elif isinstance(current, OrExpression): + return Categories.OR + elif isinstance(current, ImpExpression): + return Categories.IMP + elif isinstance(current, IffExpression): + return Categories.IFF + elif isinstance(current, EqualityExpression): + return Categories.EQ + elif isinstance(current, ExistsExpression): + return Categories.EXISTS + elif isinstance(current, ApplicationExpression): + return Categories.APP + else: + raise ProverParseError("cannot categorize %s" % current.__class__.__name__) + + def _categorize_NegatedExpression(self, current): + negated = current.term + + if isinstance(negated, NegatedExpression): + return Categories.D_NEG + elif isinstance(negated, FunctionVariableExpression): + return Categories.N_PROP + elif TableauProver.is_atom(negated): + return Categories.N_ATOM + elif isinstance(negated, AllExpression): + return Categories.N_ALL + elif isinstance(negated, AndExpression): + return Categories.N_AND + elif isinstance(negated, OrExpression): + return Categories.N_OR + elif isinstance(negated, ImpExpression): + return Categories.N_IMP + elif isinstance(negated, IffExpression): + return Categories.N_IFF + elif isinstance(negated, EqualityExpression): + return Categories.N_EQ + elif isinstance(negated, ExistsExpression): + return Categories.N_EXISTS + elif isinstance(negated, ApplicationExpression): + return Categories.N_APP + else: + raise ProverParseError("cannot categorize %s" % negated.__class__.__name__) + + +class Debug: + def __init__(self, verbose, indent=0, lines=None): + self.verbose = verbose + self.indent = indent + + if not lines: + lines = [] + self.lines = lines + + def __add__(self, increment): + return Debug(self.verbose, self.indent + 1, self.lines) + + def line(self, data, indent=0): + if isinstance(data, tuple): + ex, ctx = data + if ctx: + data = f"{ex}, {ctx}" + else: + data = "%s" % ex + + if isinstance(ex, AllExpression): + try: + used_vars = "[%s]" % ( + ",".join("%s" % ve.variable.name for ve in ex._used_vars) + ) + data += ": %s" % used_vars + except AttributeError: + data += ": []" + + newline = "{}{}".format(" " * (self.indent + indent), data) + self.lines.append(newline) + + if self.verbose: + print(newline) + + +class Categories: + ATOM = 0 + PROP = 1 + N_ATOM = 2 + N_PROP = 3 + APP = 4 + N_APP = 5 + N_EQ = 6 + D_NEG = 7 + N_ALL = 8 + N_EXISTS = 9 + AND = 10 + N_OR = 11 + N_IMP = 12 + OR = 13 + IMP = 14 + N_AND = 15 + IFF = 16 + N_IFF = 17 + EQ = 18 + EXISTS = 19 + ALL = 20 + + +def testTableauProver(): + tableau_test("P | -P") + tableau_test("P & -P") + tableau_test("Q", ["P", "(P -> Q)"]) + tableau_test("man(x)") + tableau_test("(man(x) -> man(x))") + tableau_test("(man(x) -> --man(x))") + tableau_test("-(man(x) and -man(x))") + tableau_test("(man(x) or -man(x))") + tableau_test("(man(x) -> man(x))") + tableau_test("-(man(x) and -man(x))") + tableau_test("(man(x) or -man(x))") + tableau_test("(man(x) -> man(x))") + tableau_test("(man(x) iff man(x))") + tableau_test("-(man(x) iff -man(x))") + tableau_test("all x.man(x)") + tableau_test("all x.all y.((x = y) -> (y = x))") + tableau_test("all x.all y.all z.(((x = y) & (y = z)) -> (x = z))") + # tableau_test('-all x.some y.F(x,y) & some x.all y.(-F(x,y))') + # tableau_test('some x.all y.sees(x,y)') + + p1 = "all x.(man(x) -> mortal(x))" + p2 = "man(Socrates)" + c = "mortal(Socrates)" + tableau_test(c, [p1, p2]) + + p1 = "all x.(man(x) -> walks(x))" + p2 = "man(John)" + c = "some y.walks(y)" + tableau_test(c, [p1, p2]) + + p = "((x = y) & walks(y))" + c = "walks(x)" + tableau_test(c, [p]) + + p = "((x = y) & ((y = z) & (z = w)))" + c = "(x = w)" + tableau_test(c, [p]) + + p = "some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))" + c = "some e0.walk(e0,mary)" + tableau_test(c, [p]) + + c = "(exists x.exists z3.((x = Mary) & ((z3 = John) & sees(z3,x))) <-> exists x.exists z4.((x = John) & ((z4 = Mary) & sees(x,z4))))" + tableau_test(c) + + +# p = 'some e1.some e2.((believe e1 john e2) and (walk e2 mary))' +# c = 'some x.some e3.some e4.((believe e3 x e4) and (walk e4 mary))' +# tableau_test(c, [p]) + + +def testHigherOrderTableauProver(): + tableau_test("believe(j, -lie(b))", ["believe(j, -lie(b) & -cheat(b))"]) + tableau_test("believe(j, lie(b) & cheat(b))", ["believe(j, lie(b))"]) + tableau_test( + "believe(j, lie(b))", ["lie(b)"] + ) # how do we capture that John believes all things that are true + tableau_test( + "believe(j, know(b, cheat(b)))", + ["believe(j, know(b, lie(b)) & know(b, steals(b) & cheat(b)))"], + ) + tableau_test("P(Q(y), R(y) & R(z))", ["P(Q(x) & Q(y), R(y) & R(z))"]) + + tableau_test("believe(j, cheat(b) & lie(b))", ["believe(j, lie(b) & cheat(b))"]) + tableau_test("believe(j, -cheat(b) & -lie(b))", ["believe(j, -lie(b) & -cheat(b))"]) + + +def tableau_test(c, ps=None, verbose=False): + pc = Expression.fromstring(c) + pps = [Expression.fromstring(p) for p in ps] if ps else [] + if not ps: + ps = [] + print( + "%s |- %s: %s" + % (", ".join(ps), pc, TableauProver().prove(pc, pps, verbose=verbose)) + ) + + +def demo(): + testTableauProver() + testHigherOrderTableauProver() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/misc/__init__.py b/lib/python3.10/site-packages/nltk/misc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ac9e0f7125810319ed560d0cdfdc0c1f0114b18 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/misc/__init__.py @@ -0,0 +1,11 @@ +# Natural Language Toolkit: Miscellaneous modules +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from nltk.misc.babelfish import babelize_shell +from nltk.misc.chomsky import generate_chomsky +from nltk.misc.minimalset import MinimalSet +from nltk.misc.wordfinder import word_finder diff --git a/lib/python3.10/site-packages/nltk/misc/babelfish.py b/lib/python3.10/site-packages/nltk/misc/babelfish.py new file mode 100644 index 0000000000000000000000000000000000000000..d317d65a194578e28ffad94bd53803395b5e3c58 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/misc/babelfish.py @@ -0,0 +1,10 @@ +""" +This module previously provided an interface to Babelfish online +translation service; this service is no longer available; this +module is kept in NLTK source code in order to provide better error +messages for people following the NLTK Book 2.0. +""" + + +def babelize_shell(): + print("Babelfish online translation service is no longer available.") diff --git a/lib/python3.10/site-packages/nltk/misc/chomsky.py b/lib/python3.10/site-packages/nltk/misc/chomsky.py new file mode 100644 index 0000000000000000000000000000000000000000..0632bca034512041b3e0cf9a6231f8ac1c131e4b --- /dev/null +++ b/lib/python3.10/site-packages/nltk/misc/chomsky.py @@ -0,0 +1,134 @@ +# Chomsky random text generator, version 1.1, Raymond Hettinger, 2005/09/13 +# https://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440546 + +""" +CHOMSKY is an aid to writing linguistic papers in the style +of the great master. It is based on selected phrases taken +from actual books and articles written by Noam Chomsky. +Upon request, it assembles the phrases in the elegant +stylistic patterns that Chomsky is noted for. +To generate n sentences of linguistic wisdom, type + + (CHOMSKY n) -- for example + (CHOMSKY 5) generates half a screen of linguistic truth. +""" + +leadins = """To characterize a linguistic level L, + On the other hand, + This suggests that + It appears that + Furthermore, + We will bring evidence in favor of the following thesis: + To provide a constituent structure for T(Z,K), + From C1, it follows that + For any transformation which is sufficiently diversified in \ +application to be of any interest, + Analogously, + Clearly, + Note that + Of course, + Suppose, for instance, that + Thus + With this clarification, + Conversely, + We have already seen that + By combining adjunctions and certain deformations, + I suggested that these results would follow from the assumption that + If the position of the trace in (99c) were only relatively \ +inaccessible to movement, + However, this assumption is not correct, since + Comparing these examples with their parasitic gap counterparts in \ +(96) and (97), we see that + In the discussion of resumptive pronouns following (81), + So far, + Nevertheless, + For one thing, + Summarizing, then, we assume that + A consequence of the approach just outlined is that + Presumably, + On our assumptions, + It may be, then, that + It must be emphasized, once again, that + Let us continue to suppose that + Notice, incidentally, that """ +# List of LEADINs to buy time. + +subjects = """ the notion of level of grammaticalness + a case of semigrammaticalness of a different sort + most of the methodological work in modern linguistics + a subset of English sentences interesting on quite independent grounds + the natural general principle that will subsume this case + an important property of these three types of EC + any associated supporting element + the appearance of parasitic gaps in domains relatively inaccessible \ +to ordinary extraction + the speaker-hearer's linguistic intuition + the descriptive power of the base component + the earlier discussion of deviance + this analysis of a formative as a pair of sets of features + this selectionally introduced contextual feature + a descriptively adequate grammar + the fundamental error of regarding functional notions as categorial + relational information + the systematic use of complex symbols + the theory of syntactic features developed earlier""" +# List of SUBJECTs chosen for maximum professorial macho. + +verbs = """can be defined in such a way as to impose + delimits + suffices to account for + cannot be arbitrary in + is not subject to + does not readily tolerate + raises serious doubts about + is not quite equivalent to + does not affect the structure of + may remedy and, at the same time, eliminate + is not to be considered in determining + is to be regarded as + is unspecified with respect to + is, apparently, determined by + is necessary to impose an interpretation on + appears to correlate rather closely with + is rather different from""" +# List of VERBs chosen for autorecursive obfuscation. + +objects = """ problems of phonemic and morphological analysis. + a corpus of utterance tokens upon which conformity has been defined \ +by the paired utterance test. + the traditional practice of grammarians. + the levels of acceptability from fairly high (e.g. (99a)) to virtual \ +gibberish (e.g. (98d)). + a stipulation to place the constructions into these various categories. + a descriptive fact. + a parasitic gap construction. + the extended c-command discussed in connection with (34). + the ultimate standard that determines the accuracy of any proposed grammar. + the system of base rules exclusive of the lexicon. + irrelevant intervening contexts in selectional rules. + nondistinctness in the sense of distinctive feature theory. + a general convention regarding the forms of the grammar. + an abstract underlying order. + an important distinction in language use. + the requirement that branching is not tolerated within the dominance \ +scope of a complex symbol. + the strong generative capacity of the theory.""" +# List of OBJECTs selected for profound sententiousness. + +import random +import textwrap +from itertools import chain, islice + + +def generate_chomsky(times=5, line_length=72): + parts = [] + for part in (leadins, subjects, verbs, objects): + phraselist = list(map(str.strip, part.splitlines())) + random.shuffle(phraselist) + parts.append(phraselist) + output = chain.from_iterable(islice(zip(*parts), 0, times)) + print(textwrap.fill(" ".join(output), line_length)) + + +if __name__ == "__main__": + generate_chomsky() diff --git a/lib/python3.10/site-packages/nltk/misc/minimalset.py b/lib/python3.10/site-packages/nltk/misc/minimalset.py new file mode 100644 index 0000000000000000000000000000000000000000..50d1fa5b6e45193d15e6fa1d2aec687de503f1d2 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/misc/minimalset.py @@ -0,0 +1,85 @@ +# Natural Language Toolkit: Minimal Sets +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from collections import defaultdict + + +class MinimalSet: + """ + Find contexts where more than one possible target value can + appear. E.g. if targets are word-initial letters, and contexts + are the remainders of words, then we would like to find cases like + "fat" vs "cat", and "training" vs "draining". If targets are + parts-of-speech and contexts are words, then we would like to find + cases like wind (noun) 'air in rapid motion', vs wind (verb) + 'coil, wrap'. + """ + + def __init__(self, parameters=None): + """ + Create a new minimal set. + + :param parameters: The (context, target, display) tuples for the item + :type parameters: list(tuple(str, str, str)) + """ + self._targets = set() # the contrastive information + self._contexts = set() # what we are controlling for + self._seen = defaultdict(set) # to record what we have seen + self._displays = {} # what we will display + + if parameters: + for context, target, display in parameters: + self.add(context, target, display) + + def add(self, context, target, display): + """ + Add a new item to the minimal set, having the specified + context, target, and display form. + + :param context: The context in which the item of interest appears + :type context: str + :param target: The item of interest + :type target: str + :param display: The information to be reported for each item + :type display: str + """ + # Store the set of targets that occurred in this context + self._seen[context].add(target) + + # Keep track of which contexts and targets we have seen + self._contexts.add(context) + self._targets.add(target) + + # For a given context and target, store the display form + self._displays[(context, target)] = display + + def contexts(self, minimum=2): + """ + Determine which contexts occurred with enough distinct targets. + + :param minimum: the minimum number of distinct target forms + :type minimum: int + :rtype: list + """ + return [c for c in self._contexts if len(self._seen[c]) >= minimum] + + def display(self, context, target, default=""): + if (context, target) in self._displays: + return self._displays[(context, target)] + else: + return default + + def display_all(self, context): + result = [] + for target in self._targets: + x = self.display(context, target) + if x: + result.append(x) + return result + + def targets(self): + return self._targets diff --git a/lib/python3.10/site-packages/nltk/misc/sort.py b/lib/python3.10/site-packages/nltk/misc/sort.py new file mode 100644 index 0000000000000000000000000000000000000000..cb543d93929f45505475f9d985afea5e92f58a94 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/misc/sort.py @@ -0,0 +1,176 @@ +# Natural Language Toolkit: List Sorting +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +This module provides a variety of list sorting algorithms, to +illustrate the many different algorithms (recipes) for solving a +problem, and how to analyze algorithms experimentally. +""" +# These algorithms are taken from: +# Levitin (2004) The Design and Analysis of Algorithms + +################################################################## +# Selection Sort +################################################################## + + +def selection(a): + """ + Selection Sort: scan the list to find its smallest element, then + swap it with the first element. The remainder of the list is one + element smaller; apply the same method to this list, and so on. + """ + count = 0 + + for i in range(len(a) - 1): + min = i + + for j in range(i + 1, len(a)): + if a[j] < a[min]: + min = j + + count += 1 + + a[min], a[i] = a[i], a[min] + + return count + + +################################################################## +# Bubble Sort +################################################################## + + +def bubble(a): + """ + Bubble Sort: compare adjacent elements of the list left-to-right, + and swap them if they are out of order. After one pass through + the list swapping adjacent items, the largest item will be in + the rightmost position. The remainder is one element smaller; + apply the same method to this list, and so on. + """ + count = 0 + for i in range(len(a) - 1): + for j in range(len(a) - i - 1): + if a[j + 1] < a[j]: + a[j], a[j + 1] = a[j + 1], a[j] + count += 1 + return count + + +################################################################## +# Merge Sort +################################################################## + + +def _merge_lists(b, c): + count = 0 + i = j = 0 + a = [] + while i < len(b) and j < len(c): + count += 1 + if b[i] <= c[j]: + a.append(b[i]) + i += 1 + else: + a.append(c[j]) + j += 1 + if i == len(b): + a += c[j:] + else: + a += b[i:] + return a, count + + +def merge(a): + """ + Merge Sort: split the list in half, and sort each half, then + combine the sorted halves. + """ + count = 0 + if len(a) > 1: + midpoint = len(a) // 2 + b = a[:midpoint] + c = a[midpoint:] + count_b = merge(b) + count_c = merge(c) + result, count_a = _merge_lists(b, c) + a[:] = result # copy the result back into a. + count = count_a + count_b + count_c + return count + + +################################################################## +# Quick Sort +################################################################## + + +def _partition(a, l, r): + p = a[l] + i = l + j = r + 1 + count = 0 + while True: + while i < r: + i += 1 + if a[i] >= p: + break + while j > l: + j -= 1 + if j < l or a[j] <= p: + break + a[i], a[j] = a[j], a[i] # swap + count += 1 + if i >= j: + break + a[i], a[j] = a[j], a[i] # undo last swap + a[l], a[j] = a[j], a[l] + return j, count + + +def _quick(a, l, r): + count = 0 + if l < r: + s, count = _partition(a, l, r) + count += _quick(a, l, s - 1) + count += _quick(a, s + 1, r) + return count + + +def quick(a): + return _quick(a, 0, len(a) - 1) + + +################################################################## +# Demonstration +################################################################## + + +def demo(): + from random import shuffle + + for size in (10, 20, 50, 100, 200, 500, 1000): + a = list(range(size)) + + # various sort methods + shuffle(a) + count_selection = selection(a) + shuffle(a) + count_bubble = bubble(a) + shuffle(a) + count_merge = merge(a) + shuffle(a) + count_quick = quick(a) + + print( + ("size=%5d: selection=%8d, bubble=%8d, " "merge=%6d, quick=%6d") + % (size, count_selection, count_bubble, count_merge, count_quick) + ) + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/misc/wordfinder.py b/lib/python3.10/site-packages/nltk/misc/wordfinder.py new file mode 100644 index 0000000000000000000000000000000000000000..e8ddca0dd6282e988ad38d287ae1029dadc98dfc --- /dev/null +++ b/lib/python3.10/site-packages/nltk/misc/wordfinder.py @@ -0,0 +1,139 @@ +# Natural Language Toolkit: Word Finder +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Simplified from PHP version by Robert Klein +# http://fswordfinder.sourceforge.net/ + +import random + + +# reverse a word with probability 0.5 +def revword(word): + if random.randint(1, 2) == 1: + return word[::-1] + return word + + +# try to insert word at position x,y; direction encoded in xf,yf +def step(word, x, xf, y, yf, grid): + for i in range(len(word)): + if grid[xf(i)][yf(i)] != "" and grid[xf(i)][yf(i)] != word[i]: + return False + for i in range(len(word)): + grid[xf(i)][yf(i)] = word[i] + return True + + +# try to insert word at position x,y, in direction dir +def check(word, dir, x, y, grid, rows, cols): + if dir == 1: + if x - len(word) < 0 or y - len(word) < 0: + return False + return step(word, x, lambda i: x - i, y, lambda i: y - i, grid) + elif dir == 2: + if x - len(word) < 0: + return False + return step(word, x, lambda i: x - i, y, lambda i: y, grid) + elif dir == 3: + if x - len(word) < 0 or y + (len(word) - 1) >= cols: + return False + return step(word, x, lambda i: x - i, y, lambda i: y + i, grid) + elif dir == 4: + if y - len(word) < 0: + return False + return step(word, x, lambda i: x, y, lambda i: y - i, grid) + + +def wordfinder(words, rows=20, cols=20, attempts=50, alph="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): + """ + Attempt to arrange words into a letter-grid with the specified + number of rows and columns. Try each word in several positions + and directions, until it can be fitted into the grid, or the + maximum number of allowable attempts is exceeded. Returns a tuple + consisting of the grid and the words that were successfully + placed. + + :param words: the list of words to be put into the grid + :type words: list + :param rows: the number of rows in the grid + :type rows: int + :param cols: the number of columns in the grid + :type cols: int + :param attempts: the number of times to attempt placing a word + :type attempts: int + :param alph: the alphabet, to be used for filling blank cells + :type alph: list + :rtype: tuple + """ + + # place longer words first + words = sorted(words, key=len, reverse=True) + + grid = [] # the letter grid + used = [] # the words we used + + # initialize the grid + for i in range(rows): + grid.append([""] * cols) + + # try to place each word + for word in words: + word = word.strip().upper() # normalize + save = word # keep a record of the word + word = revword(word) + for attempt in range(attempts): + r = random.randint(0, len(word)) + dir = random.choice([1, 2, 3, 4]) + x = random.randint(0, rows) + y = random.randint(0, cols) + if dir == 1: + x += r + y += r + elif dir == 2: + x += r + elif dir == 3: + x += r + y -= r + elif dir == 4: + y += r + if 0 <= x < rows and 0 <= y < cols: + if check(word, dir, x, y, grid, rows, cols): + # used.append((save, dir, x, y, word)) + used.append(save) + break + + # Fill up the remaining spaces + for i in range(rows): + for j in range(cols): + if grid[i][j] == "": + grid[i][j] = random.choice(alph) + + return grid, used + + +def word_finder(): + from nltk.corpus import words + + wordlist = words.words() + random.shuffle(wordlist) + wordlist = wordlist[:200] + wordlist = [w for w in wordlist if 3 <= len(w) <= 12] + grid, used = wordfinder(wordlist) + + print("Word Finder\n") + for i in range(len(grid)): + for j in range(len(grid[i])): + print(grid[i][j], end=" ") + print() + print() + + for i in range(len(used)): + print("%d:" % (i + 1), used[i]) + + +if __name__ == "__main__": + word_finder() diff --git a/lib/python3.10/site-packages/nltk/sem/__init__.py b/lib/python3.10/site-packages/nltk/sem/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bbb3f032bef5ce79ab7232566bc73aa17ff661b --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/__init__.py @@ -0,0 +1,75 @@ +# Natural Language Toolkit: Semantic Interpretation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Semantic Interpretation Package + +This package contains classes for representing semantic structure in +formulas of first-order logic and for evaluating such formulas in +set-theoretic models. + + >>> from nltk.sem import logic + >>> logic._counter._value = 0 + +The package has two main components: + + - ``logic`` provides support for analyzing expressions of First + Order Logic (FOL). + - ``evaluate`` allows users to recursively determine truth in a + model for formulas of FOL. + +A model consists of a domain of discourse and a valuation function, +which assigns values to non-logical constants. We assume that entities +in the domain are represented as strings such as ``'b1'``, ``'g1'``, +etc. A ``Valuation`` is initialized with a list of (symbol, value) +pairs, where values are entities, sets of entities or sets of tuples +of entities. +The domain of discourse can be inferred from the valuation, and model +is then created with domain and valuation as parameters. + + >>> from nltk.sem import Valuation, Model + >>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'), + ... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), + ... ('dog', set(['d1'])), + ... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))] + >>> val = Valuation(v) + >>> dom = val.domain + >>> m = Model(dom, val) +""" + +from nltk.sem.boxer import Boxer +from nltk.sem.drt import DRS, DrtExpression +from nltk.sem.evaluate import ( + Assignment, + Model, + Undefined, + Valuation, + arity, + is_rel, + read_valuation, + set2rel, +) +from nltk.sem.lfg import FStructure +from nltk.sem.logic import ( + ApplicationExpression, + Expression, + LogicalExpressionException, + Variable, + binding_ops, + boolean_ops, + equality_preds, + read_logic, +) +from nltk.sem.relextract import clause, extract_rels, rtuple +from nltk.sem.skolemize import skolemize +from nltk.sem.util import evaluate_sents, interpret_sents, parse_sents, root_semrep + +# from nltk.sem.glue import Glue +# from nltk.sem.hole import HoleSemantics +# from nltk.sem.cooper_storage import CooperStore + +# don't import chat80 as its names are too generic diff --git a/lib/python3.10/site-packages/nltk/sem/boxer.py b/lib/python3.10/site-packages/nltk/sem/boxer.py new file mode 100644 index 0000000000000000000000000000000000000000..d0acd4a607e3bf3481b3f896e8103a9069870c56 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/boxer.py @@ -0,0 +1,1605 @@ +# Natural Language Toolkit: Interface to Boxer +# +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +An interface to Boxer. + +This interface relies on the latest version of the development (subversion) version of +C&C and Boxer. + +Usage +===== + +Set the environment variable CANDC to the bin directory of your CandC installation. +The models directory should be in the CandC root directory. +For example:: + + /path/to/candc/ + bin/ + candc + boxer + models/ + boxer/ +""" + +import operator +import os +import re +import subprocess +import tempfile +from functools import reduce +from optparse import OptionParser + +from nltk.internals import find_binary +from nltk.sem.drt import ( + DRS, + DrtApplicationExpression, + DrtEqualityExpression, + DrtNegatedExpression, + DrtOrExpression, + DrtParser, + DrtProposition, + DrtTokens, + DrtVariableExpression, +) +from nltk.sem.logic import ( + ExpectedMoreTokensException, + LogicalExpressionException, + UnexpectedTokenException, + Variable, +) + + +class Boxer: + """ + This class is an interface to Johan Bos's program Boxer, a wide-coverage + semantic parser that produces Discourse Representation Structures (DRSs). + """ + + def __init__( + self, + boxer_drs_interpreter=None, + elimeq=False, + bin_dir=None, + verbose=False, + resolve=True, + ): + """ + :param boxer_drs_interpreter: A class that converts from the + ``AbstractBoxerDrs`` object hierarchy to a different object. The + default is ``NltkDrtBoxerDrsInterpreter``, which converts to the NLTK + DRT hierarchy. + :param elimeq: When set to true, Boxer removes all equalities from the + DRSs and discourse referents standing in the equality relation are + unified, but only if this can be done in a meaning-preserving manner. + :param resolve: When set to true, Boxer will resolve all anaphoric DRSs and perform merge-reduction. + Resolution follows Van der Sandt's theory of binding and accommodation. + """ + if boxer_drs_interpreter is None: + boxer_drs_interpreter = NltkDrtBoxerDrsInterpreter() + self._boxer_drs_interpreter = boxer_drs_interpreter + + self._resolve = resolve + self._elimeq = elimeq + + self.set_bin_dir(bin_dir, verbose) + + def set_bin_dir(self, bin_dir, verbose=False): + self._candc_bin = self._find_binary("candc", bin_dir, verbose) + self._candc_models_path = os.path.normpath( + os.path.join(self._candc_bin[:-5], "../models") + ) + self._boxer_bin = self._find_binary("boxer", bin_dir, verbose) + + def interpret(self, input, discourse_id=None, question=False, verbose=False): + """ + Use Boxer to give a first order representation. + + :param input: str Input sentence to parse + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate. + :return: ``drt.DrtExpression`` + """ + discourse_ids = [discourse_id] if discourse_id is not None else None + (d,) = self.interpret_multi_sents([[input]], discourse_ids, question, verbose) + if not d: + raise Exception(f'Unable to interpret: "{input}"') + return d + + def interpret_multi(self, input, discourse_id=None, question=False, verbose=False): + """ + Use Boxer to give a first order representation. + + :param input: list of str Input sentences to parse as a single discourse + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate. + :return: ``drt.DrtExpression`` + """ + discourse_ids = [discourse_id] if discourse_id is not None else None + (d,) = self.interpret_multi_sents([input], discourse_ids, question, verbose) + if not d: + raise Exception(f'Unable to interpret: "{input}"') + return d + + def interpret_sents( + self, inputs, discourse_ids=None, question=False, verbose=False + ): + """ + Use Boxer to give a first order representation. + + :param inputs: list of str Input sentences to parse as individual discourses + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate. + :return: list of ``drt.DrtExpression`` + """ + return self.interpret_multi_sents( + [[input] for input in inputs], discourse_ids, question, verbose + ) + + def interpret_multi_sents( + self, inputs, discourse_ids=None, question=False, verbose=False + ): + """ + Use Boxer to give a first order representation. + + :param inputs: list of list of str Input discourses to parse + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate. + :return: ``drt.DrtExpression`` + """ + if discourse_ids is not None: + assert len(inputs) == len(discourse_ids) + assert reduce(operator.and_, (id is not None for id in discourse_ids)) + use_disc_id = True + else: + discourse_ids = list(map(str, range(len(inputs)))) + use_disc_id = False + + candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose) + boxer_out = self._call_boxer(candc_out, verbose=verbose) + + # if 'ERROR: input file contains no ccg/2 terms.' in boxer_out: + # raise UnparseableInputException('Could not parse with candc: "%s"' % input_str) + + drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id) + return [drs_dict.get(id, None) for id in discourse_ids] + + def _call_candc(self, inputs, discourse_ids, question, verbose=False): + """ + Call the ``candc`` binary with the given input. + + :param inputs: list of list of str Input discourses to parse + :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate. + :param filename: str A filename for the output file + :return: stdout + """ + args = [ + "--models", + os.path.join(self._candc_models_path, ["boxer", "questions"][question]), + "--candc-printer", + "boxer", + ] + return self._call( + "\n".join( + sum( + ([f"'{id}'"] + d for d, id in zip(inputs, discourse_ids)), + [], + ) + ), + self._candc_bin, + args, + verbose, + ) + + def _call_boxer(self, candc_out, verbose=False): + """ + Call the ``boxer`` binary with the given input. + + :param candc_out: str output from C&C parser + :return: stdout + """ + f = None + try: + fd, temp_filename = tempfile.mkstemp( + prefix="boxer-", suffix=".in", text=True + ) + f = os.fdopen(fd, "w") + f.write(candc_out.decode("utf-8")) + finally: + if f: + f.close() + + args = [ + "--box", + "false", + "--semantics", + "drs", + #'--flat', 'false', # removed from boxer + "--resolve", + ["false", "true"][self._resolve], + "--elimeq", + ["false", "true"][self._elimeq], + "--format", + "prolog", + "--instantiate", + "true", + "--input", + temp_filename, + ] + stdout = self._call(None, self._boxer_bin, args, verbose) + os.remove(temp_filename) + return stdout + + def _find_binary(self, name, bin_dir, verbose=False): + return find_binary( + name, + path_to_bin=bin_dir, + env_vars=["CANDC"], + url="http://svn.ask.it.usyd.edu.au/trac/candc/", + binary_names=[name, name + ".exe"], + verbose=verbose, + ) + + def _call(self, input_str, binary, args=[], verbose=False): + """ + Call the binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param binary: The location of the binary to call + :param args: A list of command-line arguments. + :return: stdout + """ + if verbose: + print("Calling:", binary) + print("Args:", args) + print("Input:", input_str) + print("Command:", binary + " " + " ".join(args)) + + # Call via a subprocess + if input_str is None: + cmd = [binary] + args + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + else: + cmd = 'echo "{}" | {} {}'.format(input_str, binary, " ".join(args)) + p = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True + ) + stdout, stderr = p.communicate() + + if verbose: + print("Return code:", p.returncode) + if stdout: + print("stdout:\n", stdout, "\n") + if stderr: + print("stderr:\n", stderr, "\n") + if p.returncode != 0: + raise Exception( + "ERROR CALLING: {} {}\nReturncode: {}\n{}".format( + binary, " ".join(args), p.returncode, stderr + ) + ) + + return stdout + + def _parse_to_drs_dict(self, boxer_out, use_disc_id): + lines = boxer_out.decode("utf-8").split("\n") + drs_dict = {} + i = 0 + while i < len(lines): + line = lines[i] + if line.startswith("id("): + comma_idx = line.index(",") + discourse_id = line[3:comma_idx] + if discourse_id[0] == "'" and discourse_id[-1] == "'": + discourse_id = discourse_id[1:-1] + drs_id = line[comma_idx + 1 : line.index(")")] + i += 1 + line = lines[i] + assert line.startswith(f"sem({drs_id},") + if line[-4:] == "').'": + line = line[:-4] + ")." + assert line.endswith(")."), f"can't parse line: {line}" + + search_start = len(f"sem({drs_id},[") + brace_count = 1 + drs_start = -1 + for j, c in enumerate(line[search_start:]): + if c == "[": + brace_count += 1 + if c == "]": + brace_count -= 1 + if brace_count == 0: + drs_start = search_start + j + 1 + if line[drs_start : drs_start + 3] == "','": + drs_start = drs_start + 3 + else: + drs_start = drs_start + 1 + break + assert drs_start > -1 + + drs_input = line[drs_start:-2].strip() + parsed = self._parse_drs(drs_input, discourse_id, use_disc_id) + drs_dict[discourse_id] = self._boxer_drs_interpreter.interpret(parsed) + i += 1 + return drs_dict + + def _parse_drs(self, drs_string, discourse_id, use_disc_id): + return BoxerOutputDrsParser([None, discourse_id][use_disc_id]).parse(drs_string) + + +class BoxerOutputDrsParser(DrtParser): + def __init__(self, discourse_id=None): + """ + This class is used to parse the Prolog DRS output from Boxer into a + hierarchy of python objects. + """ + DrtParser.__init__(self) + self.discourse_id = discourse_id + self.sentence_id_offset = None + self.quote_chars = [("'", "'", "\\", False)] + + def parse(self, data, signature=None): + return DrtParser.parse(self, data, signature) + + def get_all_symbols(self): + return ["(", ")", ",", "[", "]", ":"] + + def handle(self, tok, context): + return self.handle_drs(tok) + + def attempt_adjuncts(self, expression, context): + return expression + + def parse_condition(self, indices): + """ + Parse a DRS condition + + :return: list of ``DrtExpression`` + """ + tok = self.token() + accum = self.handle_condition(tok, indices) + if accum is None: + raise UnexpectedTokenException(tok) + return accum + + def handle_drs(self, tok): + if tok == "drs": + return self.parse_drs() + elif tok in ["merge", "smerge"]: + return self._handle_binary_expression(self._make_merge_expression)(None, []) + elif tok in ["alfa"]: + return self._handle_alfa(self._make_merge_expression)(None, []) + + def handle_condition(self, tok, indices): + """ + Handle a DRS condition + + :param indices: list of int + :return: list of ``DrtExpression`` + """ + if tok == "not": + return [self._handle_not()] + + if tok == "or": + conds = [self._handle_binary_expression(self._make_or_expression)] + elif tok == "imp": + conds = [self._handle_binary_expression(self._make_imp_expression)] + elif tok == "eq": + conds = [self._handle_eq()] + elif tok == "prop": + conds = [self._handle_prop()] + + elif tok == "pred": + conds = [self._handle_pred()] + elif tok == "named": + conds = [self._handle_named()] + elif tok == "rel": + conds = [self._handle_rel()] + elif tok == "timex": + conds = self._handle_timex() + elif tok == "card": + conds = [self._handle_card()] + + elif tok == "whq": + conds = [self._handle_whq()] + elif tok == "duplex": + conds = [self._handle_duplex()] + + else: + conds = [] + + return sum( + ( + [cond(sent_index, word_indices) for cond in conds] + for sent_index, word_indices in self._sent_and_word_indices(indices) + ), + [], + ) + + def _handle_not(self): + self.assertToken(self.token(), "(") + drs = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return BoxerNot(drs) + + def _handle_pred(self): + # pred(_G3943, dog, n, 0) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + name = self.token() + self.assertToken(self.token(), ",") + pos = self.token() + self.assertToken(self.token(), ",") + sense = int(self.token()) + self.assertToken(self.token(), ")") + + def _handle_pred_f(sent_index, word_indices): + return BoxerPred( + self.discourse_id, sent_index, word_indices, variable, name, pos, sense + ) + + return _handle_pred_f + + def _handle_duplex(self): + # duplex(whq, drs(...), var, drs(...)) + self.assertToken(self.token(), "(") + # self.assertToken(self.token(), '[') + ans_types = [] + # while self.token(0) != ']': + # cat = self.token() + # self.assertToken(self.token(), ':') + # if cat == 'des': + # ans_types.append(self.token()) + # elif cat == 'num': + # ans_types.append('number') + # typ = self.token() + # if typ == 'cou': + # ans_types.append('count') + # else: + # ans_types.append(typ) + # else: + # ans_types.append(self.token()) + # self.token() #swallow the ']' + + self.assertToken(self.token(), "whq") + self.assertToken(self.token(), ",") + d1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + ref = self.parse_variable() + self.assertToken(self.token(), ",") + d2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerWhq( + self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2 + ) + + def _handle_named(self): + # named(x0, john, per, 0) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + name = self.token() + self.assertToken(self.token(), ",") + type = self.token() + self.assertToken(self.token(), ",") + sense = self.token() # as per boxer rev 2554 + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerNamed( + self.discourse_id, sent_index, word_indices, variable, name, type, sense + ) + + def _handle_rel(self): + # rel(_G3993, _G3943, agent, 0) + self.assertToken(self.token(), "(") + var1 = self.parse_variable() + self.assertToken(self.token(), ",") + var2 = self.parse_variable() + self.assertToken(self.token(), ",") + rel = self.token() + self.assertToken(self.token(), ",") + sense = int(self.token()) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerRel( + self.discourse_id, sent_index, word_indices, var1, var2, rel, sense + ) + + def _handle_timex(self): + # timex(_G18322, date([]: (+), []:'XXXX', [1004]:'04', []:'XX')) + self.assertToken(self.token(), "(") + arg = self.parse_variable() + self.assertToken(self.token(), ",") + new_conds = self._handle_time_expression(arg) + self.assertToken(self.token(), ")") + return new_conds + + def _handle_time_expression(self, arg): + # date([]: (+), []:'XXXX', [1004]:'04', []:'XX') + tok = self.token() + self.assertToken(self.token(), "(") + if tok == "date": + conds = self._handle_date(arg) + elif tok == "time": + conds = self._handle_time(arg) + else: + return None + self.assertToken(self.token(), ")") + return [ + lambda sent_index, word_indices: BoxerPred( + self.discourse_id, sent_index, word_indices, arg, tok, "n", 0 + ) + ] + [lambda sent_index, word_indices: cond for cond in conds] + + def _handle_date(self, arg): + # []: (+), []:'XXXX', [1004]:'04', []:'XX' + conds = [] + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + self.assertToken(self.token(), "(") + pol = self.token() + self.assertToken(self.token(), ")") + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_pol_{pol}", + "a", + 0, + ) + ) + self.assertToken(self.token(), ",") + + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + year = self.token() + if year != "XXXX": + year = year.replace(":", "_") + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_year_{year}", + "a", + 0, + ) + ) + self.assertToken(self.token(), ",") + + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + month = self.token() + if month != "XX": + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_month_{month}", + "a", + 0, + ) + ) + self.assertToken(self.token(), ",") + + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + day = self.token() + if day != "XX": + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_day_{day}", + "a", + 0, + ) + ) + + return conds + + def _handle_time(self, arg): + # time([1018]:'18', []:'XX', []:'XX') + conds = [] + self._parse_index_list() + hour = self.token() + if hour != "XX": + conds.append(self._make_atom("r_hour_2", arg, hour)) + self.assertToken(self.token(), ",") + + self._parse_index_list() + min = self.token() + if min != "XX": + conds.append(self._make_atom("r_min_2", arg, min)) + self.assertToken(self.token(), ",") + + self._parse_index_list() + sec = self.token() + if sec != "XX": + conds.append(self._make_atom("r_sec_2", arg, sec)) + + return conds + + def _handle_card(self): + # card(_G18535, 28, ge) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + value = self.token() + self.assertToken(self.token(), ",") + type = self.token() + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerCard( + self.discourse_id, sent_index, word_indices, variable, value, type + ) + + def _handle_prop(self): + # prop(_G15949, drs(...)) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + drs = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerProp( + self.discourse_id, sent_index, word_indices, variable, drs + ) + + def _parse_index_list(self): + # [1001,1002]: + indices = [] + self.assertToken(self.token(), "[") + while self.token(0) != "]": + indices.append(self.parse_index()) + if self.token(0) == ",": + self.token() # swallow ',' + self.token() # swallow ']' + self.assertToken(self.token(), ":") + return indices + + def parse_drs(self): + # drs([[1001]:_G3943], + # [[1002]:pred(_G3943, dog, n, 0)] + # ) + self.assertToken(self.token(), "(") + self.assertToken(self.token(), "[") + refs = set() + while self.token(0) != "]": + indices = self._parse_index_list() + refs.add(self.parse_variable()) + if self.token(0) == ",": + self.token() # swallow ',' + self.token() # swallow ']' + self.assertToken(self.token(), ",") + self.assertToken(self.token(), "[") + conds = [] + while self.token(0) != "]": + indices = self._parse_index_list() + conds.extend(self.parse_condition(indices)) + if self.token(0) == ",": + self.token() # swallow ',' + self.token() # swallow ']' + self.assertToken(self.token(), ")") + return BoxerDrs(list(refs), conds) + + def _handle_binary_expression(self, make_callback): + self.assertToken(self.token(), "(") + drs1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + drs2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: make_callback( + sent_index, word_indices, drs1, drs2 + ) + + def _handle_alfa(self, make_callback): + self.assertToken(self.token(), "(") + type = self.token() + self.assertToken(self.token(), ",") + drs1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + drs2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: make_callback( + sent_index, word_indices, drs1, drs2 + ) + + def _handle_eq(self): + self.assertToken(self.token(), "(") + var1 = self.parse_variable() + self.assertToken(self.token(), ",") + var2 = self.parse_variable() + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerEq( + self.discourse_id, sent_index, word_indices, var1, var2 + ) + + def _handle_whq(self): + self.assertToken(self.token(), "(") + self.assertToken(self.token(), "[") + ans_types = [] + while self.token(0) != "]": + cat = self.token() + self.assertToken(self.token(), ":") + if cat == "des": + ans_types.append(self.token()) + elif cat == "num": + ans_types.append("number") + typ = self.token() + if typ == "cou": + ans_types.append("count") + else: + ans_types.append(typ) + else: + ans_types.append(self.token()) + self.token() # swallow the ']' + + self.assertToken(self.token(), ",") + d1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + ref = self.parse_variable() + self.assertToken(self.token(), ",") + d2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerWhq( + self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2 + ) + + def _make_merge_expression(self, sent_index, word_indices, drs1, drs2): + return BoxerDrs(drs1.refs + drs2.refs, drs1.conds + drs2.conds) + + def _make_or_expression(self, sent_index, word_indices, drs1, drs2): + return BoxerOr(self.discourse_id, sent_index, word_indices, drs1, drs2) + + def _make_imp_expression(self, sent_index, word_indices, drs1, drs2): + return BoxerDrs(drs1.refs, drs1.conds, drs2) + + def parse_variable(self): + var = self.token() + assert re.match(r"^[exps]\d+$", var), var + return var + + def parse_index(self): + return int(self.token()) + + def _sent_and_word_indices(self, indices): + """ + :return: list of (sent_index, word_indices) tuples + """ + sent_indices = {(i / 1000) - 1 for i in indices if i >= 0} + if sent_indices: + pairs = [] + for sent_index in sent_indices: + word_indices = [ + (i % 1000) - 1 for i in indices if sent_index == (i / 1000) - 1 + ] + pairs.append((sent_index, word_indices)) + return pairs + else: + word_indices = [(i % 1000) - 1 for i in indices] + return [(None, word_indices)] + + +class BoxerDrsParser(DrtParser): + """ + Reparse the str form of subclasses of ``AbstractBoxerDrs`` + """ + + def __init__(self, discourse_id=None): + DrtParser.__init__(self) + self.discourse_id = discourse_id + + def get_all_symbols(self): + return [ + DrtTokens.OPEN, + DrtTokens.CLOSE, + DrtTokens.COMMA, + DrtTokens.OPEN_BRACKET, + DrtTokens.CLOSE_BRACKET, + ] + + def attempt_adjuncts(self, expression, context): + return expression + + def handle(self, tok, context): + try: + # if tok == 'drs': + # self.assertNextToken(DrtTokens.OPEN) + # label = int(self.token()) + # self.assertNextToken(DrtTokens.COMMA) + # refs = list(map(int, self.handle_refs())) + # self.assertNextToken(DrtTokens.COMMA) + # conds = self.handle_conds(None) + # self.assertNextToken(DrtTokens.CLOSE) + # return BoxerDrs(label, refs, conds) + if tok == "pred": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + variable = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + name = self.token() + self.assertNextToken(DrtTokens.COMMA) + pos = self.token() + self.assertNextToken(DrtTokens.COMMA) + sense = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerPred(disc_id, sent_id, word_ids, variable, name, pos, sense) + elif tok == "named": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + word_ids = map(int, self.handle_refs()) + self.assertNextToken(DrtTokens.COMMA) + variable = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + name = self.token() + self.assertNextToken(DrtTokens.COMMA) + type = self.token() + self.assertNextToken(DrtTokens.COMMA) + sense = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerNamed( + disc_id, sent_id, word_ids, variable, name, type, sense + ) + elif tok == "rel": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + var1 = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + var2 = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + rel = self.token() + self.assertNextToken(DrtTokens.COMMA) + sense = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerRel(disc_id, sent_id, word_ids, var1, var2, rel, sense) + elif tok == "prop": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + variable = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + drs = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerProp(disc_id, sent_id, word_ids, variable, drs) + elif tok == "not": + self.assertNextToken(DrtTokens.OPEN) + drs = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerNot(drs) + elif tok == "imp": + self.assertNextToken(DrtTokens.OPEN) + drs1 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.COMMA) + drs2 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerDrs(drs1.refs, drs1.conds, drs2) + elif tok == "or": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = map(int, self.handle_refs()) + self.assertNextToken(DrtTokens.COMMA) + drs1 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.COMMA) + drs2 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerOr(disc_id, sent_id, word_ids, drs1, drs2) + elif tok == "eq": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + var1 = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + var2 = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerEq(disc_id, sent_id, word_ids, var1, var2) + elif tok == "card": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = map(int, self.handle_refs()) + self.assertNextToken(DrtTokens.COMMA) + var = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + value = self.token() + self.assertNextToken(DrtTokens.COMMA) + type = self.token() + self.assertNextToken(DrtTokens.CLOSE) + return BoxerCard(disc_id, sent_id, word_ids, var, value, type) + elif tok == "whq": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + ans_types = self.handle_refs() + self.assertNextToken(DrtTokens.COMMA) + drs1 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.COMMA) + var = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + drs2 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerWhq(disc_id, sent_id, word_ids, ans_types, drs1, var, drs2) + except Exception as e: + raise LogicalExpressionException(self._currentIndex, str(e)) from e + assert False, repr(tok) + + def nullableIntToken(self): + t = self.token() + return int(t) if t != "None" else None + + def get_next_token_variable(self, description): + try: + return self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException(e.index, "Variable expected.") from e + + +class AbstractBoxerDrs: + def variables(self): + """ + :return: (set, set, set) + """ + variables, events, propositions = self._variables() + return (variables - (events | propositions), events, propositions - events) + + def variable_types(self): + vartypes = {} + for t, vars in zip(("z", "e", "p"), self.variables()): + for v in vars: + vartypes[v] = t + return vartypes + + def _variables(self): + """ + :return: (set, set, set) + """ + return (set(), set(), set()) + + def atoms(self): + return set() + + def clean(self): + return self + + def _clean_name(self, name): + return name.replace("-", "_").replace("'", "_") + + def renumber_sentences(self, f): + return self + + def __hash__(self): + return hash(f"{self}") + + +class BoxerDrs(AbstractBoxerDrs): + def __init__(self, refs, conds, consequent=None): + AbstractBoxerDrs.__init__(self) + self.refs = refs + self.conds = conds + self.consequent = consequent + + def _variables(self): + variables = (set(), set(), set()) + for cond in self.conds: + for s, v in zip(variables, cond._variables()): + s.update(v) + if self.consequent is not None: + for s, v in zip(variables, self.consequent._variables()): + s.update(v) + return variables + + def atoms(self): + atoms = reduce(operator.or_, (cond.atoms() for cond in self.conds), set()) + if self.consequent is not None: + atoms.update(self.consequent.atoms()) + return atoms + + def clean(self): + consequent = self.consequent.clean() if self.consequent else None + return BoxerDrs(self.refs, [c.clean() for c in self.conds], consequent) + + def renumber_sentences(self, f): + consequent = self.consequent.renumber_sentences(f) if self.consequent else None + return BoxerDrs( + self.refs, [c.renumber_sentences(f) for c in self.conds], consequent + ) + + def __repr__(self): + s = "drs([{}], [{}])".format( + ", ".join("%s" % r for r in self.refs), + ", ".join("%s" % c for c in self.conds), + ) + if self.consequent is not None: + s = f"imp({s}, {self.consequent})" + return s + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.refs == other.refs + and len(self.conds) == len(other.conds) + and reduce( + operator.and_, (c1 == c2 for c1, c2 in zip(self.conds, other.conds)) + ) + and self.consequent == other.consequent + ) + + def __ne__(self, other): + return not self == other + + __hash__ = AbstractBoxerDrs.__hash__ + + +class BoxerNot(AbstractBoxerDrs): + def __init__(self, drs): + AbstractBoxerDrs.__init__(self) + self.drs = drs + + def _variables(self): + return self.drs._variables() + + def atoms(self): + return self.drs.atoms() + + def clean(self): + return BoxerNot(self.drs.clean()) + + def renumber_sentences(self, f): + return BoxerNot(self.drs.renumber_sentences(f)) + + def __repr__(self): + return "not(%s)" % (self.drs) + + def __eq__(self, other): + return self.__class__ == other.__class__ and self.drs == other.drs + + def __ne__(self, other): + return not self == other + + __hash__ = AbstractBoxerDrs.__hash__ + + +class BoxerIndexed(AbstractBoxerDrs): + def __init__(self, discourse_id, sent_index, word_indices): + AbstractBoxerDrs.__init__(self) + self.discourse_id = discourse_id + self.sent_index = sent_index + self.word_indices = word_indices + + def atoms(self): + return {self} + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.discourse_id == other.discourse_id + and self.sent_index == other.sent_index + and self.word_indices == other.word_indices + and reduce(operator.and_, (s == o for s, o in zip(self, other))) + ) + + def __ne__(self, other): + return not self == other + + __hash__ = AbstractBoxerDrs.__hash__ + + def __repr__(self): + s = "{}({}, {}, [{}]".format( + self._pred(), + self.discourse_id, + self.sent_index, + ", ".join("%s" % wi for wi in self.word_indices), + ) + for v in self: + s += ", %s" % v + return s + ")" + + +class BoxerPred(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, name, pos, sense): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.name = name + self.pos = pos + self.sense = sense + + def _variables(self): + return ({self.var}, set(), set()) + + def change_var(self, var): + return BoxerPred( + self.discourse_id, + self.sent_index, + self.word_indices, + var, + self.name, + self.pos, + self.sense, + ) + + def clean(self): + return BoxerPred( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var, + self._clean_name(self.name), + self.pos, + self.sense, + ) + + def renumber_sentences(self, f): + new_sent_index = f(self.sent_index) + return BoxerPred( + self.discourse_id, + new_sent_index, + self.word_indices, + self.var, + self.name, + self.pos, + self.sense, + ) + + def __iter__(self): + return iter((self.var, self.name, self.pos, self.sense)) + + def _pred(self): + return "pred" + + +class BoxerNamed(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, name, type, sense): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.name = name + self.type = type + self.sense = sense + + def _variables(self): + return ({self.var}, set(), set()) + + def change_var(self, var): + return BoxerNamed( + self.discourse_id, + self.sent_index, + self.word_indices, + var, + self.name, + self.type, + self.sense, + ) + + def clean(self): + return BoxerNamed( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var, + self._clean_name(self.name), + self.type, + self.sense, + ) + + def renumber_sentences(self, f): + return BoxerNamed( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var, + self.name, + self.type, + self.sense, + ) + + def __iter__(self): + return iter((self.var, self.name, self.type, self.sense)) + + def _pred(self): + return "named" + + +class BoxerRel(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var1, var2, rel, sense): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var1 = var1 + self.var2 = var2 + self.rel = rel + self.sense = sense + + def _variables(self): + return ({self.var1, self.var2}, set(), set()) + + def clean(self): + return BoxerRel( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var1, + self.var2, + self._clean_name(self.rel), + self.sense, + ) + + def renumber_sentences(self, f): + return BoxerRel( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var1, + self.var2, + self.rel, + self.sense, + ) + + def __iter__(self): + return iter((self.var1, self.var2, self.rel, self.sense)) + + def _pred(self): + return "rel" + + +class BoxerProp(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, drs): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.drs = drs + + def _variables(self): + return tuple( + map(operator.or_, (set(), set(), {self.var}), self.drs._variables()) + ) + + def referenced_labels(self): + return {self.drs} + + def atoms(self): + return self.drs.atoms() + + def clean(self): + return BoxerProp( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var, + self.drs.clean(), + ) + + def renumber_sentences(self, f): + return BoxerProp( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var, + self.drs.renumber_sentences(f), + ) + + def __iter__(self): + return iter((self.var, self.drs)) + + def _pred(self): + return "prop" + + +class BoxerEq(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var1, var2): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var1 = var1 + self.var2 = var2 + + def _variables(self): + return ({self.var1, self.var2}, set(), set()) + + def atoms(self): + return set() + + def renumber_sentences(self, f): + return BoxerEq( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var1, + self.var2, + ) + + def __iter__(self): + return iter((self.var1, self.var2)) + + def _pred(self): + return "eq" + + +class BoxerCard(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, value, type): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.value = value + self.type = type + + def _variables(self): + return ({self.var}, set(), set()) + + def renumber_sentences(self, f): + return BoxerCard( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var, + self.value, + self.type, + ) + + def __iter__(self): + return iter((self.var, self.value, self.type)) + + def _pred(self): + return "card" + + +class BoxerOr(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, drs1, drs2): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.drs1 = drs1 + self.drs2 = drs2 + + def _variables(self): + return tuple(map(operator.or_, self.drs1._variables(), self.drs2._variables())) + + def atoms(self): + return self.drs1.atoms() | self.drs2.atoms() + + def clean(self): + return BoxerOr( + self.discourse_id, + self.sent_index, + self.word_indices, + self.drs1.clean(), + self.drs2.clean(), + ) + + def renumber_sentences(self, f): + return BoxerOr( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.drs1, + self.drs2, + ) + + def __iter__(self): + return iter((self.drs1, self.drs2)) + + def _pred(self): + return "or" + + +class BoxerWhq(BoxerIndexed): + def __init__( + self, discourse_id, sent_index, word_indices, ans_types, drs1, variable, drs2 + ): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.ans_types = ans_types + self.drs1 = drs1 + self.variable = variable + self.drs2 = drs2 + + def _variables(self): + return tuple( + map( + operator.or_, + ({self.variable}, set(), set()), + self.drs1._variables(), + self.drs2._variables(), + ) + ) + + def atoms(self): + return self.drs1.atoms() | self.drs2.atoms() + + def clean(self): + return BoxerWhq( + self.discourse_id, + self.sent_index, + self.word_indices, + self.ans_types, + self.drs1.clean(), + self.variable, + self.drs2.clean(), + ) + + def renumber_sentences(self, f): + return BoxerWhq( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.ans_types, + self.drs1, + self.variable, + self.drs2, + ) + + def __iter__(self): + return iter( + ("[" + ",".join(self.ans_types) + "]", self.drs1, self.variable, self.drs2) + ) + + def _pred(self): + return "whq" + + +class PassthroughBoxerDrsInterpreter: + def interpret(self, ex): + return ex + + +class NltkDrtBoxerDrsInterpreter: + def __init__(self, occur_index=False): + self._occur_index = occur_index + + def interpret(self, ex): + """ + :param ex: ``AbstractBoxerDrs`` + :return: ``DrtExpression`` + """ + if isinstance(ex, BoxerDrs): + drs = DRS( + [Variable(r) for r in ex.refs], list(map(self.interpret, ex.conds)) + ) + if ex.consequent is not None: + drs.consequent = self.interpret(ex.consequent) + return drs + elif isinstance(ex, BoxerNot): + return DrtNegatedExpression(self.interpret(ex.drs)) + elif isinstance(ex, BoxerPred): + pred = self._add_occur_indexing(f"{ex.pos}_{ex.name}", ex) + return self._make_atom(pred, ex.var) + elif isinstance(ex, BoxerNamed): + pred = self._add_occur_indexing(f"ne_{ex.type}_{ex.name}", ex) + return self._make_atom(pred, ex.var) + elif isinstance(ex, BoxerRel): + pred = self._add_occur_indexing("%s" % (ex.rel), ex) + return self._make_atom(pred, ex.var1, ex.var2) + elif isinstance(ex, BoxerProp): + return DrtProposition(Variable(ex.var), self.interpret(ex.drs)) + elif isinstance(ex, BoxerEq): + return DrtEqualityExpression( + DrtVariableExpression(Variable(ex.var1)), + DrtVariableExpression(Variable(ex.var2)), + ) + elif isinstance(ex, BoxerCard): + pred = self._add_occur_indexing(f"card_{ex.type}_{ex.value}", ex) + return self._make_atom(pred, ex.var) + elif isinstance(ex, BoxerOr): + return DrtOrExpression(self.interpret(ex.drs1), self.interpret(ex.drs2)) + elif isinstance(ex, BoxerWhq): + drs1 = self.interpret(ex.drs1) + drs2 = self.interpret(ex.drs2) + return DRS(drs1.refs + drs2.refs, drs1.conds + drs2.conds) + assert False, f"{ex.__class__.__name__}: {ex}" + + def _make_atom(self, pred, *args): + accum = DrtVariableExpression(Variable(pred)) + for arg in args: + accum = DrtApplicationExpression( + accum, DrtVariableExpression(Variable(arg)) + ) + return accum + + def _add_occur_indexing(self, base, ex): + if self._occur_index and ex.sent_index is not None: + if ex.discourse_id: + base += "_%s" % ex.discourse_id + base += "_s%s" % ex.sent_index + base += "_w%s" % sorted(ex.word_indices)[0] + return base + + +class UnparseableInputException(Exception): + pass + + +if __name__ == "__main__": + opts = OptionParser("usage: %prog TEXT [options]") + opts.add_option( + "--verbose", + "-v", + help="display verbose logs", + action="store_true", + default=False, + dest="verbose", + ) + opts.add_option( + "--fol", "-f", help="output FOL", action="store_true", default=False, dest="fol" + ) + opts.add_option( + "--question", + "-q", + help="input is a question", + action="store_true", + default=False, + dest="question", + ) + opts.add_option( + "--occur", + "-o", + help="occurrence index", + action="store_true", + default=False, + dest="occur_index", + ) + (options, args) = opts.parse_args() + + if len(args) != 1: + opts.error("incorrect number of arguments") + + interpreter = NltkDrtBoxerDrsInterpreter(occur_index=options.occur_index) + drs = Boxer(interpreter).interpret_multi( + args[0].split(r"\n"), question=options.question, verbose=options.verbose + ) + if drs is None: + print(None) + else: + drs = drs.simplify().eliminate_equality() + if options.fol: + print(drs.fol().normalize()) + else: + drs.pretty_print() diff --git a/lib/python3.10/site-packages/nltk/sem/chat80.py b/lib/python3.10/site-packages/nltk/sem/chat80.py new file mode 100644 index 0000000000000000000000000000000000000000..3d1e77a49f19b2e4414f66741570cdb033ec7ca6 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/chat80.py @@ -0,0 +1,857 @@ +# Natural Language Toolkit: Chat-80 KB Reader +# See https://www.w3.org/TR/swbp-skos-core-guide/ +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein , +# URL: +# For license information, see LICENSE.TXT + +r""" +Overview +======== + +Chat-80 was a natural language system which allowed the user to +interrogate a Prolog knowledge base in the domain of world +geography. It was developed in the early '80s by Warren and Pereira; see +``https://www.aclweb.org/anthology/J82-3002.pdf`` for a description and +``http://www.cis.upenn.edu/~pereira/oldies.html`` for the source +files. + +This module contains functions to extract data from the Chat-80 +relation files ('the world database'), and convert then into a format +that can be incorporated in the FOL models of +``nltk.sem.evaluate``. The code assumes that the Prolog +input files are available in the NLTK corpora directory. + +The Chat-80 World Database consists of the following files:: + + world0.pl + rivers.pl + cities.pl + countries.pl + contain.pl + borders.pl + +This module uses a slightly modified version of ``world0.pl``, in which +a set of Prolog rules have been omitted. The modified file is named +``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since +it uses a list rather than a string in the second field. + +Reading Chat-80 Files +===================== + +Chat-80 relations are like tables in a relational database. The +relation acts as the name of the table; the first argument acts as the +'primary key'; and subsequent arguments are further fields in the +table. In general, the name of the table provides a label for a unary +predicate whose extension is all the primary keys. For example, +relations in ``cities.pl`` are of the following form:: + + 'city(athens,greece,1368).' + +Here, ``'athens'`` is the key, and will be mapped to a member of the +unary predicate *city*. + +The fields in the table are mapped to binary predicates. The first +argument of the predicate is the primary key, while the second +argument is the data in the relevant field. Thus, in the above +example, the third field is mapped to the binary predicate +*population_of*, whose extension is a set of pairs such as +``'(athens, 1368)'``. + +An exception to this general framework is required by the relations in +the files ``borders.pl`` and ``contains.pl``. These contain facts of the +following form:: + + 'borders(albania,greece).' + + 'contains0(africa,central_africa).' + +We do not want to form a unary concept out the element in +the first field of these records, and we want the label of the binary +relation just to be ``'border'``/``'contain'`` respectively. + +In order to drive the extraction process, we use 'relation metadata bundles' +which are Python dictionaries such as the following:: + + city = {'label': 'city', + 'closures': [], + 'schema': ['city', 'country', 'population'], + 'filename': 'cities.pl'} + +According to this, the file ``city['filename']`` contains a list of +relational tuples (or more accurately, the corresponding strings in +Prolog form) whose predicate symbol is ``city['label']`` and whose +relational schema is ``city['schema']``. The notion of a ``closure`` is +discussed in the next section. + +Concepts +======== +In order to encapsulate the results of the extraction, a class of +``Concept`` objects is introduced. A ``Concept`` object has a number of +attributes, in particular a ``prefLabel`` and ``extension``, which make +it easier to inspect the output of the extraction. In addition, the +``extension`` can be further processed: in the case of the ``'border'`` +relation, we check that the relation is symmetric, and in the case +of the ``'contain'`` relation, we carry out the transitive +closure. The closure properties associated with a concept is +indicated in the relation metadata, as indicated earlier. + +The ``extension`` of a ``Concept`` object is then incorporated into a +``Valuation`` object. + +Persistence +=========== +The functions ``val_dump`` and ``val_load`` are provided to allow a +valuation to be stored in a persistent database and re-loaded, rather +than having to be re-computed each time. + +Individuals and Lexical Items +============================= +As well as deriving relations from the Chat-80 data, we also create a +set of individual constants, one for each entity in the domain. The +individual constants are string-identical to the entities. For +example, given a data item such as ``'zloty'``, we add to the valuation +a pair ``('zloty', 'zloty')``. In order to parse English sentences that +refer to these entities, we also create a lexical item such as the +following for each individual constant:: + + PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty' + +The set of rules is written to the file ``chat_pnames.cfg`` in the +current directory. + +""" + +import os +import re +import shelve +import sys + +import nltk.data + +########################################################################### +# Chat-80 relation metadata bundles needed to build the valuation +########################################################################### + +borders = { + "rel_name": "borders", + "closures": ["symmetric"], + "schema": ["region", "border"], + "filename": "borders.pl", +} + +contains = { + "rel_name": "contains0", + "closures": ["transitive"], + "schema": ["region", "contain"], + "filename": "contain.pl", +} + +city = { + "rel_name": "city", + "closures": [], + "schema": ["city", "country", "population"], + "filename": "cities.pl", +} + +country = { + "rel_name": "country", + "closures": [], + "schema": [ + "country", + "region", + "latitude", + "longitude", + "area", + "population", + "capital", + "currency", + ], + "filename": "countries.pl", +} + +circle_of_lat = { + "rel_name": "circle_of_latitude", + "closures": [], + "schema": ["circle_of_latitude", "degrees"], + "filename": "world1.pl", +} + +circle_of_long = { + "rel_name": "circle_of_longitude", + "closures": [], + "schema": ["circle_of_longitude", "degrees"], + "filename": "world1.pl", +} + +continent = { + "rel_name": "continent", + "closures": [], + "schema": ["continent"], + "filename": "world1.pl", +} + +region = { + "rel_name": "in_continent", + "closures": [], + "schema": ["region", "continent"], + "filename": "world1.pl", +} + +ocean = { + "rel_name": "ocean", + "closures": [], + "schema": ["ocean"], + "filename": "world1.pl", +} + +sea = {"rel_name": "sea", "closures": [], "schema": ["sea"], "filename": "world1.pl"} + + +items = [ + "borders", + "contains", + "city", + "country", + "circle_of_lat", + "circle_of_long", + "continent", + "region", + "ocean", + "sea", +] +items = tuple(sorted(items)) + +item_metadata = { + "borders": borders, + "contains": contains, + "city": city, + "country": country, + "circle_of_lat": circle_of_lat, + "circle_of_long": circle_of_long, + "continent": continent, + "region": region, + "ocean": ocean, + "sea": sea, +} + +rels = item_metadata.values() + +not_unary = ["borders.pl", "contain.pl"] + +########################################################################### + + +class Concept: + """ + A Concept class, loosely based on SKOS + (https://www.w3.org/TR/swbp-skos-core-guide/). + """ + + def __init__(self, prefLabel, arity, altLabels=[], closures=[], extension=set()): + """ + :param prefLabel: the preferred label for the concept + :type prefLabel: str + :param arity: the arity of the concept + :type arity: int + :param altLabels: other (related) labels + :type altLabels: list + :param closures: closure properties of the extension + (list items can be ``symmetric``, ``reflexive``, ``transitive``) + :type closures: list + :param extension: the extensional value of the concept + :type extension: set + """ + self.prefLabel = prefLabel + self.arity = arity + self.altLabels = altLabels + self.closures = closures + # keep _extension internally as a set + self._extension = extension + # public access is via a list (for slicing) + self.extension = sorted(list(extension)) + + def __str__(self): + # _extension = '' + # for element in sorted(self.extension): + # if isinstance(element, tuple): + # element = '(%s, %s)' % (element) + # _extension += element + ', ' + # _extension = _extension[:-1] + + return "Label = '{}'\nArity = {}\nExtension = {}".format( + self.prefLabel, + self.arity, + self.extension, + ) + + def __repr__(self): + return "Concept('%s')" % self.prefLabel + + def augment(self, data): + """ + Add more data to the ``Concept``'s extension set. + + :param data: a new semantic value + :type data: string or pair of strings + :rtype: set + + """ + self._extension.add(data) + self.extension = sorted(list(self._extension)) + return self._extension + + def _make_graph(self, s): + """ + Convert a set of pairs into an adjacency linked list encoding of a graph. + """ + g = {} + for (x, y) in s: + if x in g: + g[x].append(y) + else: + g[x] = [y] + return g + + def _transclose(self, g): + """ + Compute the transitive closure of a graph represented as a linked list. + """ + for x in g: + for adjacent in g[x]: + # check that adjacent is a key + if adjacent in g: + for y in g[adjacent]: + if y not in g[x]: + g[x].append(y) + return g + + def _make_pairs(self, g): + """ + Convert an adjacency linked list back into a set of pairs. + """ + pairs = [] + for node in g: + for adjacent in g[node]: + pairs.append((node, adjacent)) + return set(pairs) + + def close(self): + """ + Close a binary relation in the ``Concept``'s extension set. + + :return: a new extension for the ``Concept`` in which the + relation is closed under a given property + """ + from nltk.sem import is_rel + + assert is_rel(self._extension) + if "symmetric" in self.closures: + pairs = [] + for (x, y) in self._extension: + pairs.append((y, x)) + sym = set(pairs) + self._extension = self._extension.union(sym) + if "transitive" in self.closures: + all = self._make_graph(self._extension) + closed = self._transclose(all) + trans = self._make_pairs(closed) + self._extension = self._extension.union(trans) + self.extension = sorted(list(self._extension)) + + +def clause2concepts(filename, rel_name, schema, closures=[]): + """ + Convert a file of Prolog clauses into a list of ``Concept`` objects. + + :param filename: filename containing the relations + :type filename: str + :param rel_name: name of the relation + :type rel_name: str + :param schema: the schema used in a set of relational tuples + :type schema: list + :param closures: closure properties for the extension of the concept + :type closures: list + :return: a list of ``Concept`` objects + :rtype: list + """ + concepts = [] + # position of the subject of a binary relation + subj = 0 + # label of the 'primary key' + pkey = schema[0] + # fields other than the primary key + fields = schema[1:] + + # convert a file into a list of lists + records = _str2records(filename, rel_name) + + # add a unary concept corresponding to the set of entities + # in the primary key position + # relations in 'not_unary' are more like ordinary binary relations + if not filename in not_unary: + concepts.append(unary_concept(pkey, subj, records)) + + # add a binary concept for each non-key field + for field in fields: + obj = schema.index(field) + concepts.append(binary_concept(field, closures, subj, obj, records)) + + return concepts + + +def cities2table(filename, rel_name, dbname, verbose=False, setup=False): + """ + Convert a file of Prolog clauses into a database table. + + This is not generic, since it doesn't allow arbitrary + schemas to be set as a parameter. + + Intended usage:: + + cities2table('cities.pl', 'city', 'city.db', verbose=True, setup=True) + + :param filename: filename containing the relations + :type filename: str + :param rel_name: name of the relation + :type rel_name: str + :param dbname: filename of persistent store + :type schema: str + """ + import sqlite3 + + records = _str2records(filename, rel_name) + connection = sqlite3.connect(dbname) + cur = connection.cursor() + if setup: + cur.execute( + """CREATE TABLE city_table + (City text, Country text, Population int)""" + ) + + table_name = "city_table" + for t in records: + cur.execute("insert into %s values (?,?,?)" % table_name, t) + if verbose: + print("inserting values into %s: " % table_name, t) + connection.commit() + if verbose: + print("Committing update to %s" % dbname) + cur.close() + + +def sql_query(dbname, query): + """ + Execute an SQL query over a database. + :param dbname: filename of persistent store + :type schema: str + :param query: SQL query + :type rel_name: str + """ + import sqlite3 + + try: + path = nltk.data.find(dbname) + connection = sqlite3.connect(str(path)) + cur = connection.cursor() + return cur.execute(query) + except (ValueError, sqlite3.OperationalError): + import warnings + + warnings.warn( + "Make sure the database file %s is installed and uncompressed." % dbname + ) + raise + + +def _str2records(filename, rel): + """ + Read a file into memory and convert each relation clause into a list. + """ + recs = [] + contents = nltk.data.load("corpora/chat80/%s" % filename, format="text") + for line in contents.splitlines(): + if line.startswith(rel): + line = re.sub(rel + r"\(", "", line) + line = re.sub(r"\)\.$", "", line) + record = line.split(",") + recs.append(record) + return recs + + +def unary_concept(label, subj, records): + """ + Make a unary concept out of the primary key in a record. + + A record is a list of entities in some relation, such as + ``['france', 'paris']``, where ``'france'`` is acting as the primary + key. + + :param label: the preferred label for the concept + :type label: string + :param subj: position in the record of the subject of the predicate + :type subj: int + :param records: a list of records + :type records: list of lists + :return: ``Concept`` of arity 1 + :rtype: Concept + """ + c = Concept(label, arity=1, extension=set()) + for record in records: + c.augment(record[subj]) + return c + + +def binary_concept(label, closures, subj, obj, records): + """ + Make a binary concept out of the primary key and another field in a record. + + A record is a list of entities in some relation, such as + ``['france', 'paris']``, where ``'france'`` is acting as the primary + key, and ``'paris'`` stands in the ``'capital_of'`` relation to + ``'france'``. + + More generally, given a record such as ``['a', 'b', 'c']``, where + label is bound to ``'B'``, and ``obj`` bound to 1, the derived + binary concept will have label ``'B_of'``, and its extension will + be a set of pairs such as ``('a', 'b')``. + + + :param label: the base part of the preferred label for the concept + :type label: str + :param closures: closure properties for the extension of the concept + :type closures: list + :param subj: position in the record of the subject of the predicate + :type subj: int + :param obj: position in the record of the object of the predicate + :type obj: int + :param records: a list of records + :type records: list of lists + :return: ``Concept`` of arity 2 + :rtype: Concept + """ + if not label == "border" and not label == "contain": + label = label + "_of" + c = Concept(label, arity=2, closures=closures, extension=set()) + for record in records: + c.augment((record[subj], record[obj])) + # close the concept's extension according to the properties in closures + c.close() + return c + + +def process_bundle(rels): + """ + Given a list of relation metadata bundles, make a corresponding + dictionary of concepts, indexed by the relation name. + + :param rels: bundle of metadata needed for constructing a concept + :type rels: list(dict) + :return: a dictionary of concepts, indexed by the relation name. + :rtype: dict(str): Concept + """ + concepts = {} + for rel in rels: + rel_name = rel["rel_name"] + closures = rel["closures"] + schema = rel["schema"] + filename = rel["filename"] + + concept_list = clause2concepts(filename, rel_name, schema, closures) + for c in concept_list: + label = c.prefLabel + if label in concepts: + for data in c.extension: + concepts[label].augment(data) + concepts[label].close() + else: + concepts[label] = c + return concepts + + +def make_valuation(concepts, read=False, lexicon=False): + """ + Convert a list of ``Concept`` objects into a list of (label, extension) pairs; + optionally create a ``Valuation`` object. + + :param concepts: concepts + :type concepts: list(Concept) + :param read: if ``True``, ``(symbol, set)`` pairs are read into a ``Valuation`` + :type read: bool + :rtype: list or Valuation + """ + vals = [] + + for c in concepts: + vals.append((c.prefLabel, c.extension)) + if lexicon: + read = True + if read: + from nltk.sem import Valuation + + val = Valuation({}) + val.update(vals) + # add labels for individuals + val = label_indivs(val, lexicon=lexicon) + return val + else: + return vals + + +def val_dump(rels, db): + """ + Make a ``Valuation`` from a list of relation metadata bundles and dump to + persistent database. + + :param rels: bundle of metadata needed for constructing a concept + :type rels: list of dict + :param db: name of file to which data is written. + The suffix '.db' will be automatically appended. + :type db: str + """ + concepts = process_bundle(rels).values() + valuation = make_valuation(concepts, read=True) + db_out = shelve.open(db, "n") + + db_out.update(valuation) + + db_out.close() + + +def val_load(db): + """ + Load a ``Valuation`` from a persistent database. + + :param db: name of file from which data is read. + The suffix '.db' should be omitted from the name. + :type db: str + """ + dbname = db + ".db" + + if not os.access(dbname, os.R_OK): + sys.exit("Cannot read file: %s" % dbname) + else: + db_in = shelve.open(db) + from nltk.sem import Valuation + + val = Valuation(db_in) + # val.read(db_in.items()) + return val + + +# def alpha(str): +# """ +# Utility to filter out non-alphabetic constants. + +#:param str: candidate constant +#:type str: string +#:rtype: bool +# """ +# try: +# int(str) +# return False +# except ValueError: +## some unknown values in records are labeled '?' +# if not str == '?': +# return True + + +def label_indivs(valuation, lexicon=False): + """ + Assign individual constants to the individuals in the domain of a ``Valuation``. + + Given a valuation with an entry of the form ``{'rel': {'a': True}}``, + add a new entry ``{'a': 'a'}``. + + :type valuation: Valuation + :rtype: Valuation + """ + # collect all the individuals into a domain + domain = valuation.domain + # convert the domain into a sorted list of alphabetic terms + # use the same string as a label + pairs = [(e, e) for e in domain] + if lexicon: + lex = make_lex(domain) + with open("chat_pnames.cfg", "w") as outfile: + outfile.writelines(lex) + # read the pairs into the valuation + valuation.update(pairs) + return valuation + + +def make_lex(symbols): + """ + Create lexical CFG rules for each individual symbol. + + Given a valuation with an entry of the form ``{'zloty': 'zloty'}``, + create a lexical rule for the proper name 'Zloty'. + + :param symbols: a list of individual constants in the semantic representation + :type symbols: sequence -- set(str) + :rtype: list(str) + """ + lex = [] + header = """ +################################################################## +# Lexical rules automatically generated by running 'chat80.py -x'. +################################################################## + +""" + lex.append(header) + template = r"PropN[num=sg, sem=<\P.(P %s)>] -> '%s'\n" + + for s in symbols: + parts = s.split("_") + caps = [p.capitalize() for p in parts] + pname = "_".join(caps) + rule = template % (s, pname) + lex.append(rule) + return lex + + +########################################################################### +# Interface function to emulate other corpus readers +########################################################################### + + +def concepts(items=items): + """ + Build a list of concepts corresponding to the relation names in ``items``. + + :param items: names of the Chat-80 relations to extract + :type items: list(str) + :return: the ``Concept`` objects which are extracted from the relations + :rtype: list(Concept) + """ + if isinstance(items, str): + items = (items,) + + rels = [item_metadata[r] for r in items] + + concept_map = process_bundle(rels) + return concept_map.values() + + +########################################################################### + + +def main(): + import sys + from optparse import OptionParser + + description = """ +Extract data from the Chat-80 Prolog files and convert them into a +Valuation object for use in the NLTK semantics package. + """ + + opts = OptionParser(description=description) + opts.set_defaults(verbose=True, lex=False, vocab=False) + opts.add_option( + "-s", "--store", dest="outdb", help="store a valuation in DB", metavar="DB" + ) + opts.add_option( + "-l", + "--load", + dest="indb", + help="load a stored valuation from DB", + metavar="DB", + ) + opts.add_option( + "-c", + "--concepts", + action="store_true", + help="print concepts instead of a valuation", + ) + opts.add_option( + "-r", + "--relation", + dest="label", + help="print concept with label REL (check possible labels with '-v' option)", + metavar="REL", + ) + opts.add_option( + "-q", + "--quiet", + action="store_false", + dest="verbose", + help="don't print out progress info", + ) + opts.add_option( + "-x", + "--lex", + action="store_true", + dest="lex", + help="write a file of lexical entries for country names, then exit", + ) + opts.add_option( + "-v", + "--vocab", + action="store_true", + dest="vocab", + help="print out the vocabulary of concept labels and their arity, then exit", + ) + + (options, args) = opts.parse_args() + if options.outdb and options.indb: + opts.error("Options --store and --load are mutually exclusive") + + if options.outdb: + # write the valuation to a persistent database + if options.verbose: + outdb = options.outdb + ".db" + print("Dumping a valuation to %s" % outdb) + val_dump(rels, options.outdb) + sys.exit(0) + else: + # try to read in a valuation from a database + if options.indb is not None: + dbname = options.indb + ".db" + if not os.access(dbname, os.R_OK): + sys.exit("Cannot read file: %s" % dbname) + else: + valuation = val_load(options.indb) + # we need to create the valuation from scratch + else: + # build some concepts + concept_map = process_bundle(rels) + concepts = concept_map.values() + # just print out the vocabulary + if options.vocab: + items = sorted((c.arity, c.prefLabel) for c in concepts) + for (arity, label) in items: + print(label, arity) + sys.exit(0) + # show all the concepts + if options.concepts: + for c in concepts: + print(c) + print() + if options.label: + print(concept_map[options.label]) + sys.exit(0) + else: + # turn the concepts into a Valuation + if options.lex: + if options.verbose: + print("Writing out lexical rules") + make_valuation(concepts, lexicon=True) + else: + valuation = make_valuation(concepts, read=True) + print(valuation) + + +def sql_demo(): + """ + Print out every row from the 'city.db' database. + """ + print() + print("Using SQL to extract rows from 'city.db' RDB.") + for row in sql_query("corpora/city_database/city.db", "SELECT * FROM city_table"): + print(row) + + +if __name__ == "__main__": + main() + sql_demo() diff --git a/lib/python3.10/site-packages/nltk/sem/cooper_storage.py b/lib/python3.10/site-packages/nltk/sem/cooper_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..a41502187ed1dfbfae5bc21bdf7c29624cab1e0f --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/cooper_storage.py @@ -0,0 +1,124 @@ +# Natural Language Toolkit: Cooper storage for Quantifier Ambiguity +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +from nltk.parse import load_parser +from nltk.parse.featurechart import InstantiateVarsChart +from nltk.sem.logic import ApplicationExpression, LambdaExpression, Variable + + +class CooperStore: + """ + A container for handling quantifier ambiguity via Cooper storage. + """ + + def __init__(self, featstruct): + """ + :param featstruct: The value of the ``sem`` node in a tree from + ``parse_with_bindops()`` + :type featstruct: FeatStruct (with features ``core`` and ``store``) + + """ + self.featstruct = featstruct + self.readings = [] + try: + self.core = featstruct["CORE"] + self.store = featstruct["STORE"] + except KeyError: + print("%s is not a Cooper storage structure" % featstruct) + + def _permute(self, lst): + """ + :return: An iterator over the permutations of the input list + :type lst: list + :rtype: iter + """ + remove = lambda lst0, index: lst0[:index] + lst0[index + 1 :] + if lst: + for index, x in enumerate(lst): + for y in self._permute(remove(lst, index)): + yield (x,) + y + else: + yield () + + def s_retrieve(self, trace=False): + r""" + Carry out S-Retrieval of binding operators in store. If hack=True, + serialize the bindop and core as strings and reparse. Ugh. + + Each permutation of the store (i.e. list of binding operators) is + taken to be a possible scoping of quantifiers. We iterate through the + binding operators in each permutation, and successively apply them to + the current term, starting with the core semantic representation, + working from the inside out. + + Binding operators are of the form:: + + bo(\P.all x.(man(x) -> P(x)),z1) + """ + for perm, store_perm in enumerate(self._permute(self.store)): + if trace: + print("Permutation %s" % (perm + 1)) + term = self.core + for bindop in store_perm: + # we just want the arguments that are wrapped by the 'bo' predicate + quant, varex = tuple(bindop.args) + # use var to make an abstraction over the current term and then + # apply the quantifier to it + term = ApplicationExpression( + quant, LambdaExpression(varex.variable, term) + ) + if trace: + print(" ", term) + term = term.simplify() + self.readings.append(term) + + +def parse_with_bindops(sentence, grammar=None, trace=0): + """ + Use a grammar with Binding Operators to parse a sentence. + """ + if not grammar: + grammar = "grammars/book_grammars/storage.fcfg" + parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart) + # Parse the sentence. + tokens = sentence.split() + return list(parser.parse(tokens)) + + +def demo(): + from nltk.sem import cooper_storage as cs + + sentence = "every girl chases a dog" + # sentence = "a man gives a bone to every dog" + print() + print("Analysis of sentence '%s'" % sentence) + print("=" * 50) + trees = cs.parse_with_bindops(sentence, trace=0) + for tree in trees: + semrep = cs.CooperStore(tree.label()["SEM"]) + print() + print("Binding operators:") + print("-" * 15) + for s in semrep.store: + print(s) + print() + print("Core:") + print("-" * 15) + print(semrep.core) + print() + print("S-Retrieval:") + print("-" * 15) + semrep.s_retrieve(trace=True) + print("Readings:") + print("-" * 15) + + for i, reading in enumerate(semrep.readings): + print(f"{i + 1}: {reading}") + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/sem/drt.py b/lib/python3.10/site-packages/nltk/sem/drt.py new file mode 100644 index 0000000000000000000000000000000000000000..53441d6617310683bab97bb7abd84f656ebc28af --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/drt.py @@ -0,0 +1,1460 @@ +# Natural Language Toolkit: Discourse Representation Theory (DRT) +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import operator +from functools import reduce +from itertools import chain + +from nltk.sem.logic import ( + APP, + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + BinaryExpression, + BooleanExpression, + ConstantExpression, + EqualityExpression, + EventVariableExpression, + ExistsExpression, + Expression, + FunctionVariableExpression, + ImpExpression, + IndividualVariableExpression, + LambdaExpression, + LogicParser, + NegatedExpression, + OrExpression, + Tokens, + Variable, + is_eventvar, + is_funcvar, + is_indvar, + unique_variable, +) + +# Import Tkinter-based modules if they are available +try: + from tkinter import Canvas, Tk + from tkinter.font import Font + + from nltk.util import in_idle + +except ImportError: + # No need to print a warning here, nltk.draw has already printed one. + pass + + +class DrtTokens(Tokens): + DRS = "DRS" + DRS_CONC = "+" + PRONOUN = "PRO" + OPEN_BRACKET = "[" + CLOSE_BRACKET = "]" + COLON = ":" + + PUNCT = [DRS_CONC, OPEN_BRACKET, CLOSE_BRACKET, COLON] + + SYMBOLS = Tokens.SYMBOLS + PUNCT + + TOKENS = Tokens.TOKENS + [DRS] + PUNCT + + +class DrtParser(LogicParser): + """A lambda calculus expression parser.""" + + def __init__(self): + LogicParser.__init__(self) + + self.operator_precedence = dict( + [(x, 1) for x in DrtTokens.LAMBDA_LIST] + + [(x, 2) for x in DrtTokens.NOT_LIST] + + [(APP, 3)] + + [(x, 4) for x in DrtTokens.EQ_LIST + Tokens.NEQ_LIST] + + [(DrtTokens.COLON, 5)] + + [(DrtTokens.DRS_CONC, 6)] + + [(x, 7) for x in DrtTokens.OR_LIST] + + [(x, 8) for x in DrtTokens.IMP_LIST] + + [(None, 9)] + ) + + def get_all_symbols(self): + """This method exists to be overridden""" + return DrtTokens.SYMBOLS + + def isvariable(self, tok): + return tok not in DrtTokens.TOKENS + + def handle(self, tok, context): + """This method is intended to be overridden for logics that + use different operators or expressions""" + if tok in DrtTokens.NOT_LIST: + return self.handle_negation(tok, context) + + elif tok in DrtTokens.LAMBDA_LIST: + return self.handle_lambda(tok, context) + + elif tok == DrtTokens.OPEN: + if self.inRange(0) and self.token(0) == DrtTokens.OPEN_BRACKET: + return self.handle_DRS(tok, context) + else: + return self.handle_open(tok, context) + + elif tok.upper() == DrtTokens.DRS: + self.assertNextToken(DrtTokens.OPEN) + return self.handle_DRS(tok, context) + + elif self.isvariable(tok): + if self.inRange(0) and self.token(0) == DrtTokens.COLON: + return self.handle_prop(tok, context) + else: + return self.handle_variable(tok, context) + + def make_NegatedExpression(self, expression): + return DrtNegatedExpression(expression) + + def handle_DRS(self, tok, context): + # a DRS + refs = self.handle_refs() + if ( + self.inRange(0) and self.token(0) == DrtTokens.COMMA + ): # if there is a comma (it's optional) + self.token() # swallow the comma + conds = self.handle_conds(context) + self.assertNextToken(DrtTokens.CLOSE) + return DRS(refs, conds, None) + + def handle_refs(self): + self.assertNextToken(DrtTokens.OPEN_BRACKET) + refs = [] + while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET: + # Support expressions like: DRS([x y],C) == DRS([x,y],C) + if refs and self.token(0) == DrtTokens.COMMA: + self.token() # swallow the comma + refs.append(self.get_next_token_variable("quantified")) + self.assertNextToken(DrtTokens.CLOSE_BRACKET) + return refs + + def handle_conds(self, context): + self.assertNextToken(DrtTokens.OPEN_BRACKET) + conds = [] + while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET: + # Support expressions like: DRS([x y],C) == DRS([x, y],C) + if conds and self.token(0) == DrtTokens.COMMA: + self.token() # swallow the comma + conds.append(self.process_next_expression(context)) + self.assertNextToken(DrtTokens.CLOSE_BRACKET) + return conds + + def handle_prop(self, tok, context): + variable = self.make_VariableExpression(tok) + self.assertNextToken(":") + drs = self.process_next_expression(DrtTokens.COLON) + return DrtProposition(variable, drs) + + def make_EqualityExpression(self, first, second): + """This method serves as a hook for other logic parsers that + have different equality expression classes""" + return DrtEqualityExpression(first, second) + + def get_BooleanExpression_factory(self, tok): + """This method serves as a hook for other logic parsers that + have different boolean operators""" + if tok == DrtTokens.DRS_CONC: + return lambda first, second: DrtConcatenation(first, second, None) + elif tok in DrtTokens.OR_LIST: + return DrtOrExpression + elif tok in DrtTokens.IMP_LIST: + + def make_imp_expression(first, second): + if isinstance(first, DRS): + return DRS(first.refs, first.conds, second) + if isinstance(first, DrtConcatenation): + return DrtConcatenation(first.first, first.second, second) + raise Exception("Antecedent of implication must be a DRS") + + return make_imp_expression + else: + return None + + def make_BooleanExpression(self, factory, first, second): + return factory(first, second) + + def make_ApplicationExpression(self, function, argument): + return DrtApplicationExpression(function, argument) + + def make_VariableExpression(self, name): + return DrtVariableExpression(Variable(name)) + + def make_LambdaExpression(self, variables, term): + return DrtLambdaExpression(variables, term) + + +class DrtExpression: + """ + This is the base abstract DRT Expression from which every DRT + Expression extends. + """ + + _drt_parser = DrtParser() + + @classmethod + def fromstring(cls, s): + return cls._drt_parser.parse(s) + + def applyto(self, other): + return DrtApplicationExpression(self, other) + + def __neg__(self): + return DrtNegatedExpression(self) + + def __and__(self, other): + return NotImplemented + + def __or__(self, other): + assert isinstance(other, DrtExpression) + return DrtOrExpression(self, other) + + def __gt__(self, other): + assert isinstance(other, DrtExpression) + if isinstance(self, DRS): + return DRS(self.refs, self.conds, other) + if isinstance(self, DrtConcatenation): + return DrtConcatenation(self.first, self.second, other) + raise Exception("Antecedent of implication must be a DRS") + + def equiv(self, other, prover=None): + """ + Check for logical equivalence. + Pass the expression (self <-> other) to the theorem prover. + If the prover says it is valid, then the self and other are equal. + + :param other: an ``DrtExpression`` to check equality against + :param prover: a ``nltk.inference.api.Prover`` + """ + assert isinstance(other, DrtExpression) + + f1 = self.simplify().fol() + f2 = other.simplify().fol() + return f1.equiv(f2, prover) + + @property + def type(self): + raise AttributeError( + "'%s' object has no attribute 'type'" % self.__class__.__name__ + ) + + def typecheck(self, signature=None): + raise NotImplementedError() + + def __add__(self, other): + return DrtConcatenation(self, other, None) + + def get_refs(self, recursive=False): + """ + Return the set of discourse referents in this DRS. + :param recursive: bool Also find discourse referents in subterms? + :return: list of ``Variable`` objects + """ + raise NotImplementedError() + + def is_pronoun_function(self): + """Is self of the form "PRO(x)"?""" + return ( + isinstance(self, DrtApplicationExpression) + and isinstance(self.function, DrtAbstractVariableExpression) + and self.function.variable.name == DrtTokens.PRONOUN + and isinstance(self.argument, DrtIndividualVariableExpression) + ) + + def make_EqualityExpression(self, first, second): + return DrtEqualityExpression(first, second) + + def make_VariableExpression(self, variable): + return DrtVariableExpression(variable) + + def resolve_anaphora(self): + return resolve_anaphora(self) + + def eliminate_equality(self): + return self.visit_structured(lambda e: e.eliminate_equality(), self.__class__) + + def pretty_format(self): + """ + Draw the DRS + :return: the pretty print string + """ + return "\n".join(self._pretty()) + + def pretty_print(self): + print(self.pretty_format()) + + def draw(self): + DrsDrawer(self).draw() + + +class DRS(DrtExpression, Expression): + """A Discourse Representation Structure.""" + + def __init__(self, refs, conds, consequent=None): + """ + :param refs: list of ``DrtIndividualVariableExpression`` for the + discourse referents + :param conds: list of ``Expression`` for the conditions + """ + self.refs = refs + self.conds = conds + self.consequent = consequent + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """Replace all instances of variable v with expression E in self, + where v is free in self.""" + if variable in self.refs: + # if a bound variable is the thing being replaced + if not replace_bound: + return self + else: + i = self.refs.index(variable) + if self.consequent: + consequent = self.consequent.replace( + variable, expression, True, alpha_convert + ) + else: + consequent = None + return DRS( + self.refs[:i] + [expression.variable] + self.refs[i + 1 :], + [ + cond.replace(variable, expression, True, alpha_convert) + for cond in self.conds + ], + consequent, + ) + else: + if alpha_convert: + # any bound variable that appears in the expression must + # be alpha converted to avoid a conflict + for ref in set(self.refs) & expression.free(): + newvar = unique_variable(ref) + newvarex = DrtVariableExpression(newvar) + i = self.refs.index(ref) + if self.consequent: + consequent = self.consequent.replace( + ref, newvarex, True, alpha_convert + ) + else: + consequent = None + self = DRS( + self.refs[:i] + [newvar] + self.refs[i + 1 :], + [ + cond.replace(ref, newvarex, True, alpha_convert) + for cond in self.conds + ], + consequent, + ) + + # replace in the conditions + if self.consequent: + consequent = self.consequent.replace( + variable, expression, replace_bound, alpha_convert + ) + else: + consequent = None + return DRS( + self.refs, + [ + cond.replace(variable, expression, replace_bound, alpha_convert) + for cond in self.conds + ], + consequent, + ) + + def free(self): + """:see: Expression.free()""" + conds_free = reduce(operator.or_, [c.free() for c in self.conds], set()) + if self.consequent: + conds_free.update(self.consequent.free()) + return conds_free - set(self.refs) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + if recursive: + conds_refs = self.refs + list( + chain.from_iterable(c.get_refs(True) for c in self.conds) + ) + if self.consequent: + conds_refs.extend(self.consequent.get_refs(True)) + return conds_refs + else: + return self.refs + + def visit(self, function, combinator): + """:see: Expression.visit()""" + parts = list(map(function, self.conds)) + if self.consequent: + parts.append(function(self.consequent)) + return combinator(parts) + + def visit_structured(self, function, combinator): + """:see: Expression.visit_structured()""" + consequent = function(self.consequent) if self.consequent else None + return combinator(self.refs, list(map(function, self.conds)), consequent) + + def eliminate_equality(self): + drs = self + i = 0 + while i < len(drs.conds): + cond = drs.conds[i] + if ( + isinstance(cond, EqualityExpression) + and isinstance(cond.first, AbstractVariableExpression) + and isinstance(cond.second, AbstractVariableExpression) + ): + drs = DRS( + list(set(drs.refs) - {cond.second.variable}), + drs.conds[:i] + drs.conds[i + 1 :], + drs.consequent, + ) + if cond.second.variable != cond.first.variable: + drs = drs.replace(cond.second.variable, cond.first, False, False) + i = 0 + i -= 1 + i += 1 + + conds = [] + for cond in drs.conds: + new_cond = cond.eliminate_equality() + new_cond_simp = new_cond.simplify() + if ( + not isinstance(new_cond_simp, DRS) + or new_cond_simp.refs + or new_cond_simp.conds + or new_cond_simp.consequent + ): + conds.append(new_cond) + + consequent = drs.consequent.eliminate_equality() if drs.consequent else None + return DRS(drs.refs, conds, consequent) + + def fol(self): + if self.consequent: + accum = None + if self.conds: + accum = reduce(AndExpression, [c.fol() for c in self.conds]) + + if accum: + accum = ImpExpression(accum, self.consequent.fol()) + else: + accum = self.consequent.fol() + + for ref in self.refs[::-1]: + accum = AllExpression(ref, accum) + + return accum + + else: + if not self.conds: + raise Exception("Cannot convert DRS with no conditions to FOL.") + accum = reduce(AndExpression, [c.fol() for c in self.conds]) + for ref in map(Variable, self._order_ref_strings(self.refs)[::-1]): + accum = ExistsExpression(ref, accum) + return accum + + def _pretty(self): + refs_line = " ".join(self._order_ref_strings(self.refs)) + + cond_lines = [ + cond + for cond_line in [ + filter(lambda s: s.strip(), cond._pretty()) for cond in self.conds + ] + for cond in cond_line + ] + length = max([len(refs_line)] + list(map(len, cond_lines))) + drs = ( + [ + " _" + "_" * length + "_ ", + "| " + refs_line.ljust(length) + " |", + "|-" + "-" * length + "-|", + ] + + ["| " + line.ljust(length) + " |" for line in cond_lines] + + ["|_" + "_" * length + "_|"] + ) + if self.consequent: + return DrtBinaryExpression._assemble_pretty( + drs, DrtTokens.IMP, self.consequent._pretty() + ) + return drs + + def _order_ref_strings(self, refs): + strings = ["%s" % ref for ref in refs] + ind_vars = [] + func_vars = [] + event_vars = [] + other_vars = [] + for s in strings: + if is_indvar(s): + ind_vars.append(s) + elif is_funcvar(s): + func_vars.append(s) + elif is_eventvar(s): + event_vars.append(s) + else: + other_vars.append(s) + return ( + sorted(other_vars) + + sorted(event_vars, key=lambda v: int([v[2:], -1][len(v[2:]) == 0])) + + sorted(func_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0]))) + + sorted(ind_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0]))) + ) + + def __eq__(self, other): + r"""Defines equality modulo alphabetic variance. + If we are comparing \x.M and \y.N, then check equality of M and N[x/y].""" + if isinstance(other, DRS): + if len(self.refs) == len(other.refs): + converted_other = other + for (r1, r2) in zip(self.refs, converted_other.refs): + varex = self.make_VariableExpression(r1) + converted_other = converted_other.replace(r2, varex, True) + if self.consequent == converted_other.consequent and len( + self.conds + ) == len(converted_other.conds): + for c1, c2 in zip(self.conds, converted_other.conds): + if not (c1 == c2): + return False + return True + return False + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + drs = "([{}],[{}])".format( + ",".join(self._order_ref_strings(self.refs)), + ", ".join("%s" % cond for cond in self.conds), + ) # map(str, self.conds))) + if self.consequent: + return ( + DrtTokens.OPEN + + drs + + " " + + DrtTokens.IMP + + " " + + "%s" % self.consequent + + DrtTokens.CLOSE + ) + return drs + + +def DrtVariableExpression(variable): + """ + This is a factory method that instantiates and returns a subtype of + ``DrtAbstractVariableExpression`` appropriate for the given variable. + """ + if is_indvar(variable.name): + return DrtIndividualVariableExpression(variable) + elif is_funcvar(variable.name): + return DrtFunctionVariableExpression(variable) + elif is_eventvar(variable.name): + return DrtEventVariableExpression(variable) + else: + return DrtConstantExpression(variable) + + +class DrtAbstractVariableExpression(DrtExpression, AbstractVariableExpression): + def fol(self): + return self + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return [] + + def _pretty(self): + s = "%s" % self + blank = " " * len(s) + return [blank, blank, s, blank] + + def eliminate_equality(self): + return self + + +class DrtIndividualVariableExpression( + DrtAbstractVariableExpression, IndividualVariableExpression +): + pass + + +class DrtFunctionVariableExpression( + DrtAbstractVariableExpression, FunctionVariableExpression +): + pass + + +class DrtEventVariableExpression( + DrtIndividualVariableExpression, EventVariableExpression +): + pass + + +class DrtConstantExpression(DrtAbstractVariableExpression, ConstantExpression): + pass + + +class DrtProposition(DrtExpression, Expression): + def __init__(self, variable, drs): + self.variable = variable + self.drs = drs + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + if self.variable == variable: + assert isinstance( + expression, DrtAbstractVariableExpression + ), "Can only replace a proposition label with a variable" + return DrtProposition( + expression.variable, + self.drs.replace(variable, expression, replace_bound, alpha_convert), + ) + else: + return DrtProposition( + self.variable, + self.drs.replace(variable, expression, replace_bound, alpha_convert), + ) + + def eliminate_equality(self): + return DrtProposition(self.variable, self.drs.eliminate_equality()) + + def get_refs(self, recursive=False): + return self.drs.get_refs(True) if recursive else [] + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.variable == other.variable + and self.drs == other.drs + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def fol(self): + return self.drs.fol() + + def _pretty(self): + drs_s = self.drs._pretty() + blank = " " * len("%s" % self.variable) + return ( + [blank + " " + line for line in drs_s[:1]] + + ["%s" % self.variable + ":" + line for line in drs_s[1:2]] + + [blank + " " + line for line in drs_s[2:]] + ) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.drs)]) + + def visit_structured(self, function, combinator): + """:see: Expression.visit_structured()""" + return combinator(self.variable, function(self.drs)) + + def __str__(self): + return f"prop({self.variable}, {self.drs})" + + +class DrtNegatedExpression(DrtExpression, NegatedExpression): + def fol(self): + return NegatedExpression(self.term.fol()) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return self.term.get_refs(recursive) + + def _pretty(self): + term_lines = self.term._pretty() + return ( + [" " + line for line in term_lines[:2]] + + ["__ " + line for line in term_lines[2:3]] + + [" | " + line for line in term_lines[3:4]] + + [" " + line for line in term_lines[4:]] + ) + + +class DrtLambdaExpression(DrtExpression, LambdaExpression): + def alpha_convert(self, newvar): + """Rename all occurrences of the variable introduced by this variable + binder in the expression to ``newvar``. + :param newvar: ``Variable``, for the new variable + """ + return self.__class__( + newvar, + self.term.replace(self.variable, DrtVariableExpression(newvar), True), + ) + + def fol(self): + return LambdaExpression(self.variable, self.term.fol()) + + def _pretty(self): + variables = [self.variable] + term = self.term + while term.__class__ == self.__class__: + variables.append(term.variable) + term = term.term + var_string = " ".join("%s" % v for v in variables) + DrtTokens.DOT + term_lines = term._pretty() + blank = " " * len(var_string) + return ( + [" " + blank + line for line in term_lines[:1]] + + [r" \ " + blank + line for line in term_lines[1:2]] + + [r" /\ " + var_string + line for line in term_lines[2:3]] + + [" " + blank + line for line in term_lines[3:]] + ) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return ( + [self.variable] + self.term.get_refs(True) if recursive else [self.variable] + ) + + +class DrtBinaryExpression(DrtExpression, BinaryExpression): + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return ( + self.first.get_refs(True) + self.second.get_refs(True) if recursive else [] + ) + + def _pretty(self): + return DrtBinaryExpression._assemble_pretty( + self._pretty_subex(self.first), + self.getOp(), + self._pretty_subex(self.second), + ) + + @staticmethod + def _assemble_pretty(first_lines, op, second_lines): + max_lines = max(len(first_lines), len(second_lines)) + first_lines = _pad_vertically(first_lines, max_lines) + second_lines = _pad_vertically(second_lines, max_lines) + blank = " " * len(op) + first_second_lines = list(zip(first_lines, second_lines)) + return ( + [ + " " + first_line + " " + blank + " " + second_line + " " + for first_line, second_line in first_second_lines[:2] + ] + + [ + "(" + first_line + " " + op + " " + second_line + ")" + for first_line, second_line in first_second_lines[2:3] + ] + + [ + " " + first_line + " " + blank + " " + second_line + " " + for first_line, second_line in first_second_lines[3:] + ] + ) + + def _pretty_subex(self, subex): + return subex._pretty() + + +class DrtBooleanExpression(DrtBinaryExpression, BooleanExpression): + pass + + +class DrtOrExpression(DrtBooleanExpression, OrExpression): + def fol(self): + return OrExpression(self.first.fol(), self.second.fol()) + + def _pretty_subex(self, subex): + if isinstance(subex, DrtOrExpression): + return [line[1:-1] for line in subex._pretty()] + return DrtBooleanExpression._pretty_subex(self, subex) + + +class DrtEqualityExpression(DrtBinaryExpression, EqualityExpression): + def fol(self): + return EqualityExpression(self.first.fol(), self.second.fol()) + + +class DrtConcatenation(DrtBooleanExpression): + """DRS of the form '(DRS + DRS)'""" + + def __init__(self, first, second, consequent=None): + DrtBooleanExpression.__init__(self, first, second) + self.consequent = consequent + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """Replace all instances of variable v with expression E in self, + where v is free in self.""" + first = self.first + second = self.second + consequent = self.consequent + + # If variable is bound + if variable in self.get_refs(): + if replace_bound: + first = first.replace( + variable, expression, replace_bound, alpha_convert + ) + second = second.replace( + variable, expression, replace_bound, alpha_convert + ) + if consequent: + consequent = consequent.replace( + variable, expression, replace_bound, alpha_convert + ) + else: + if alpha_convert: + # alpha convert every ref that is free in 'expression' + for ref in set(self.get_refs(True)) & expression.free(): + v = DrtVariableExpression(unique_variable(ref)) + first = first.replace(ref, v, True, alpha_convert) + second = second.replace(ref, v, True, alpha_convert) + if consequent: + consequent = consequent.replace(ref, v, True, alpha_convert) + + first = first.replace(variable, expression, replace_bound, alpha_convert) + second = second.replace(variable, expression, replace_bound, alpha_convert) + if consequent: + consequent = consequent.replace( + variable, expression, replace_bound, alpha_convert + ) + + return self.__class__(first, second, consequent) + + def eliminate_equality(self): + # TODO: at some point. for now, simplify. + drs = self.simplify() + assert not isinstance(drs, DrtConcatenation) + return drs.eliminate_equality() + + def simplify(self): + first = self.first.simplify() + second = self.second.simplify() + consequent = self.consequent.simplify() if self.consequent else None + + if isinstance(first, DRS) and isinstance(second, DRS): + # For any ref that is in both 'first' and 'second' + for ref in set(first.get_refs(True)) & set(second.get_refs(True)): + # alpha convert the ref in 'second' to prevent collision + newvar = DrtVariableExpression(unique_variable(ref)) + second = second.replace(ref, newvar, True) + + return DRS(first.refs + second.refs, first.conds + second.conds, consequent) + else: + return self.__class__(first, second, consequent) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + refs = self.first.get_refs(recursive) + self.second.get_refs(recursive) + if self.consequent and recursive: + refs.extend(self.consequent.get_refs(True)) + return refs + + def getOp(self): + return DrtTokens.DRS_CONC + + def __eq__(self, other): + r"""Defines equality modulo alphabetic variance. + If we are comparing \x.M and \y.N, then check equality of M and N[x/y].""" + if isinstance(other, DrtConcatenation): + self_refs = self.get_refs() + other_refs = other.get_refs() + if len(self_refs) == len(other_refs): + converted_other = other + for (r1, r2) in zip(self_refs, other_refs): + varex = self.make_VariableExpression(r1) + converted_other = converted_other.replace(r2, varex, True) + return ( + self.first == converted_other.first + and self.second == converted_other.second + and self.consequent == converted_other.consequent + ) + return False + + def __ne__(self, other): + return not self == other + + __hash__ = DrtBooleanExpression.__hash__ + + def fol(self): + e = AndExpression(self.first.fol(), self.second.fol()) + if self.consequent: + e = ImpExpression(e, self.consequent.fol()) + return e + + def _pretty(self): + drs = DrtBinaryExpression._assemble_pretty( + self._pretty_subex(self.first), + self.getOp(), + self._pretty_subex(self.second), + ) + if self.consequent: + drs = DrtBinaryExpression._assemble_pretty( + drs, DrtTokens.IMP, self.consequent._pretty() + ) + return drs + + def _pretty_subex(self, subex): + if isinstance(subex, DrtConcatenation): + return [line[1:-1] for line in subex._pretty()] + return DrtBooleanExpression._pretty_subex(self, subex) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + if self.consequent: + return combinator( + [function(self.first), function(self.second), function(self.consequent)] + ) + else: + return combinator([function(self.first), function(self.second)]) + + def __str__(self): + first = self._str_subex(self.first) + second = self._str_subex(self.second) + drs = Tokens.OPEN + first + " " + self.getOp() + " " + second + Tokens.CLOSE + if self.consequent: + return ( + DrtTokens.OPEN + + drs + + " " + + DrtTokens.IMP + + " " + + "%s" % self.consequent + + DrtTokens.CLOSE + ) + return drs + + def _str_subex(self, subex): + s = "%s" % subex + if isinstance(subex, DrtConcatenation) and subex.consequent is None: + return s[1:-1] + return s + + +class DrtApplicationExpression(DrtExpression, ApplicationExpression): + def fol(self): + return ApplicationExpression(self.function.fol(), self.argument.fol()) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return ( + self.function.get_refs(True) + self.argument.get_refs(True) + if recursive + else [] + ) + + def _pretty(self): + function, args = self.uncurry() + function_lines = function._pretty() + args_lines = [arg._pretty() for arg in args] + max_lines = max(map(len, [function_lines] + args_lines)) + function_lines = _pad_vertically(function_lines, max_lines) + args_lines = [_pad_vertically(arg_lines, max_lines) for arg_lines in args_lines] + func_args_lines = list(zip(function_lines, list(zip(*args_lines)))) + return ( + [ + func_line + " " + " ".join(args_line) + " " + for func_line, args_line in func_args_lines[:2] + ] + + [ + func_line + "(" + ",".join(args_line) + ")" + for func_line, args_line in func_args_lines[2:3] + ] + + [ + func_line + " " + " ".join(args_line) + " " + for func_line, args_line in func_args_lines[3:] + ] + ) + + +def _pad_vertically(lines, max_lines): + pad_line = [" " * len(lines[0])] + return lines + pad_line * (max_lines - len(lines)) + + +class PossibleAntecedents(list, DrtExpression, Expression): + def free(self): + """Set of free variables.""" + return set(self) + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """Replace all instances of variable v with expression E in self, + where v is free in self.""" + result = PossibleAntecedents() + for item in self: + if item == variable: + self.append(expression) + else: + self.append(item) + return result + + def _pretty(self): + s = "%s" % self + blank = " " * len(s) + return [blank, blank, s] + + def __str__(self): + return "[" + ",".join("%s" % it for it in self) + "]" + + +class AnaphoraResolutionException(Exception): + pass + + +def resolve_anaphora(expression, trail=[]): + if isinstance(expression, ApplicationExpression): + if expression.is_pronoun_function(): + possible_antecedents = PossibleAntecedents() + for ancestor in trail: + for ref in ancestor.get_refs(): + refex = expression.make_VariableExpression(ref) + + # ========================================================== + # Don't allow resolution to itself or other types + # ========================================================== + if refex.__class__ == expression.argument.__class__ and not ( + refex == expression.argument + ): + possible_antecedents.append(refex) + + if len(possible_antecedents) == 1: + resolution = possible_antecedents[0] + else: + resolution = possible_antecedents + return expression.make_EqualityExpression(expression.argument, resolution) + else: + r_function = resolve_anaphora(expression.function, trail + [expression]) + r_argument = resolve_anaphora(expression.argument, trail + [expression]) + return expression.__class__(r_function, r_argument) + + elif isinstance(expression, DRS): + r_conds = [] + for cond in expression.conds: + r_cond = resolve_anaphora(cond, trail + [expression]) + + # if the condition is of the form '(x = [])' then raise exception + if isinstance(r_cond, EqualityExpression): + if isinstance(r_cond.first, PossibleAntecedents): + # Reverse the order so that the variable is on the left + temp = r_cond.first + r_cond.first = r_cond.second + r_cond.second = temp + if isinstance(r_cond.second, PossibleAntecedents): + if not r_cond.second: + raise AnaphoraResolutionException( + "Variable '%s' does not " + "resolve to anything." % r_cond.first + ) + + r_conds.append(r_cond) + if expression.consequent: + consequent = resolve_anaphora(expression.consequent, trail + [expression]) + else: + consequent = None + return expression.__class__(expression.refs, r_conds, consequent) + + elif isinstance(expression, AbstractVariableExpression): + return expression + + elif isinstance(expression, NegatedExpression): + return expression.__class__( + resolve_anaphora(expression.term, trail + [expression]) + ) + + elif isinstance(expression, DrtConcatenation): + if expression.consequent: + consequent = resolve_anaphora(expression.consequent, trail + [expression]) + else: + consequent = None + return expression.__class__( + resolve_anaphora(expression.first, trail + [expression]), + resolve_anaphora(expression.second, trail + [expression]), + consequent, + ) + + elif isinstance(expression, BinaryExpression): + return expression.__class__( + resolve_anaphora(expression.first, trail + [expression]), + resolve_anaphora(expression.second, trail + [expression]), + ) + + elif isinstance(expression, LambdaExpression): + return expression.__class__( + expression.variable, resolve_anaphora(expression.term, trail + [expression]) + ) + + +class DrsDrawer: + BUFFER = 3 # Space between elements + TOPSPACE = 10 # Space above whole DRS + OUTERSPACE = 6 # Space to the left, right, and bottom of the while DRS + + def __init__(self, drs, size_canvas=True, canvas=None): + """ + :param drs: ``DrtExpression``, The DRS to be drawn + :param size_canvas: bool, True if the canvas size should be the exact size of the DRS + :param canvas: ``Canvas`` The canvas on which to draw the DRS. If none is given, create a new canvas. + """ + master = None + if not canvas: + master = Tk() + master.title("DRT") + + font = Font(family="helvetica", size=12) + + if size_canvas: + canvas = Canvas(master, width=0, height=0) + canvas.font = font + self.canvas = canvas + (right, bottom) = self._visit(drs, self.OUTERSPACE, self.TOPSPACE) + + width = max(right + self.OUTERSPACE, 100) + height = bottom + self.OUTERSPACE + canvas = Canvas(master, width=width, height=height) # , bg='white') + else: + canvas = Canvas(master, width=300, height=300) + + canvas.pack() + canvas.font = font + + self.canvas = canvas + self.drs = drs + self.master = master + + def _get_text_height(self): + """Get the height of a line of text""" + return self.canvas.font.metrics("linespace") + + def draw(self, x=OUTERSPACE, y=TOPSPACE): + """Draw the DRS""" + self._handle(self.drs, self._draw_command, x, y) + + if self.master and not in_idle(): + self.master.mainloop() + else: + return self._visit(self.drs, x, y) + + def _visit(self, expression, x, y): + """ + Return the bottom-rightmost point without actually drawing the item + + :param expression: the item to visit + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + return self._handle(expression, self._visit_command, x, y) + + def _draw_command(self, item, x, y): + """ + Draw the given item at the given location + + :param item: the item to draw + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + if isinstance(item, str): + self.canvas.create_text(x, y, anchor="nw", font=self.canvas.font, text=item) + elif isinstance(item, tuple): + # item is the lower-right of a box + (right, bottom) = item + self.canvas.create_rectangle(x, y, right, bottom) + horiz_line_y = ( + y + self._get_text_height() + (self.BUFFER * 2) + ) # the line separating refs from conds + self.canvas.create_line(x, horiz_line_y, right, horiz_line_y) + + return self._visit_command(item, x, y) + + def _visit_command(self, item, x, y): + """ + Return the bottom-rightmost point without actually drawing the item + + :param item: the item to visit + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + if isinstance(item, str): + return (x + self.canvas.font.measure(item), y + self._get_text_height()) + elif isinstance(item, tuple): + return item + + def _handle(self, expression, command, x=0, y=0): + """ + :param expression: the expression to handle + :param command: the function to apply, either _draw_command or _visit_command + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + if command == self._visit_command: + # if we don't need to draw the item, then we can use the cached values + try: + # attempt to retrieve cached values + right = expression._drawing_width + x + bottom = expression._drawing_height + y + return (right, bottom) + except AttributeError: + # the values have not been cached yet, so compute them + pass + + if isinstance(expression, DrtAbstractVariableExpression): + factory = self._handle_VariableExpression + elif isinstance(expression, DRS): + factory = self._handle_DRS + elif isinstance(expression, DrtNegatedExpression): + factory = self._handle_NegatedExpression + elif isinstance(expression, DrtLambdaExpression): + factory = self._handle_LambdaExpression + elif isinstance(expression, BinaryExpression): + factory = self._handle_BinaryExpression + elif isinstance(expression, DrtApplicationExpression): + factory = self._handle_ApplicationExpression + elif isinstance(expression, PossibleAntecedents): + factory = self._handle_VariableExpression + elif isinstance(expression, DrtProposition): + factory = self._handle_DrtProposition + else: + raise Exception(expression.__class__.__name__) + + (right, bottom) = factory(expression, command, x, y) + + # cache the values + expression._drawing_width = right - x + expression._drawing_height = bottom - y + + return (right, bottom) + + def _handle_VariableExpression(self, expression, command, x, y): + return command("%s" % expression, x, y) + + def _handle_NegatedExpression(self, expression, command, x, y): + # Find the width of the negation symbol + right = self._visit_command(DrtTokens.NOT, x, y)[0] + + # Handle term + (right, bottom) = self._handle(expression.term, command, right, y) + + # Handle variables now that we know the y-coordinate + command( + DrtTokens.NOT, + x, + self._get_centered_top(y, bottom - y, self._get_text_height()), + ) + + return (right, bottom) + + def _handle_DRS(self, expression, command, x, y): + left = x + self.BUFFER # indent the left side + bottom = y + self.BUFFER # indent the top + + # Handle Discourse Referents + if expression.refs: + refs = " ".join("%s" % r for r in expression.refs) + else: + refs = " " + (max_right, bottom) = command(refs, left, bottom) + bottom += self.BUFFER * 2 + + # Handle Conditions + if expression.conds: + for cond in expression.conds: + (right, bottom) = self._handle(cond, command, left, bottom) + max_right = max(max_right, right) + bottom += self.BUFFER + else: + bottom += self._get_text_height() + self.BUFFER + + # Handle Box + max_right += self.BUFFER + return command((max_right, bottom), x, y) + + def _handle_ApplicationExpression(self, expression, command, x, y): + function, args = expression.uncurry() + if not isinstance(function, DrtAbstractVariableExpression): + # It's not a predicate expression ("P(x,y)"), so leave arguments curried + function = expression.function + args = [expression.argument] + + # Get the max bottom of any element on the line + function_bottom = self._visit(function, x, y)[1] + max_bottom = max( + [function_bottom] + [self._visit(arg, x, y)[1] for arg in args] + ) + + line_height = max_bottom - y + + # Handle 'function' + function_drawing_top = self._get_centered_top( + y, line_height, function._drawing_height + ) + right = self._handle(function, command, x, function_drawing_top)[0] + + # Handle open paren + centred_string_top = self._get_centered_top( + y, line_height, self._get_text_height() + ) + right = command(DrtTokens.OPEN, right, centred_string_top)[0] + + # Handle each arg + for (i, arg) in enumerate(args): + arg_drawing_top = self._get_centered_top( + y, line_height, arg._drawing_height + ) + right = self._handle(arg, command, right, arg_drawing_top)[0] + + if i + 1 < len(args): + # since it's not the last arg, add a comma + right = command(DrtTokens.COMMA + " ", right, centred_string_top)[0] + + # Handle close paren + right = command(DrtTokens.CLOSE, right, centred_string_top)[0] + + return (right, max_bottom) + + def _handle_LambdaExpression(self, expression, command, x, y): + # Find the width of the lambda symbol and abstracted variables + variables = DrtTokens.LAMBDA + "%s" % expression.variable + DrtTokens.DOT + right = self._visit_command(variables, x, y)[0] + + # Handle term + (right, bottom) = self._handle(expression.term, command, right, y) + + # Handle variables now that we know the y-coordinate + command( + variables, x, self._get_centered_top(y, bottom - y, self._get_text_height()) + ) + + return (right, bottom) + + def _handle_BinaryExpression(self, expression, command, x, y): + # Get the full height of the line, based on the operands + first_height = self._visit(expression.first, 0, 0)[1] + second_height = self._visit(expression.second, 0, 0)[1] + line_height = max(first_height, second_height) + + # Handle open paren + centred_string_top = self._get_centered_top( + y, line_height, self._get_text_height() + ) + right = command(DrtTokens.OPEN, x, centred_string_top)[0] + + # Handle the first operand + first_height = expression.first._drawing_height + (right, first_bottom) = self._handle( + expression.first, + command, + right, + self._get_centered_top(y, line_height, first_height), + ) + + # Handle the operator + right = command(" %s " % expression.getOp(), right, centred_string_top)[0] + + # Handle the second operand + second_height = expression.second._drawing_height + (right, second_bottom) = self._handle( + expression.second, + command, + right, + self._get_centered_top(y, line_height, second_height), + ) + + # Handle close paren + right = command(DrtTokens.CLOSE, right, centred_string_top)[0] + + return (right, max(first_bottom, second_bottom)) + + def _handle_DrtProposition(self, expression, command, x, y): + # Find the width of the negation symbol + right = command(expression.variable, x, y)[0] + + # Handle term + (right, bottom) = self._handle(expression.term, command, right, y) + + return (right, bottom) + + def _get_centered_top(self, top, full_height, item_height): + """Get the y-coordinate of the point that a figure should start at if + its height is 'item_height' and it needs to be centered in an area that + starts at 'top' and is 'full_height' tall.""" + return top + (full_height - item_height) / 2 + + +def demo(): + print("=" * 20 + "TEST PARSE" + "=" * 20) + dexpr = DrtExpression.fromstring + print(dexpr(r"([x,y],[sees(x,y)])")) + print(dexpr(r"([x],[man(x), walks(x)])")) + print(dexpr(r"\x.\y.([],[sees(x,y)])")) + print(dexpr(r"\x.([],[walks(x)])(john)")) + print(dexpr(r"(([x],[walks(x)]) + ([y],[runs(y)]))")) + print(dexpr(r"(([],[walks(x)]) -> ([],[runs(x)]))")) + print(dexpr(r"([x],[PRO(x), sees(John,x)])")) + print(dexpr(r"([x],[man(x), -([],[walks(x)])])")) + print(dexpr(r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])")) + + print("=" * 20 + "Test fol()" + "=" * 20) + print(dexpr(r"([x,y],[sees(x,y)])").fol()) + + print("=" * 20 + "Test alpha conversion and lambda expression equality" + "=" * 20) + e1 = dexpr(r"\x.([],[P(x)])") + print(e1) + e2 = e1.alpha_convert(Variable("z")) + print(e2) + print(e1 == e2) + + print("=" * 20 + "Test resolve_anaphora()" + "=" * 20) + print(resolve_anaphora(dexpr(r"([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])"))) + print( + resolve_anaphora(dexpr(r"([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])")) + ) + print(resolve_anaphora(dexpr(r"(([x,y],[]) + ([],[PRO(x)]))"))) + + print("=" * 20 + "Test pretty_print()" + "=" * 20) + dexpr(r"([],[])").pretty_print() + dexpr( + r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])" + ).pretty_print() + dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pretty_print() + dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pretty_print() + dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pretty_print() + + +def test_draw(): + try: + from tkinter import Tk + except ImportError as e: + raise ValueError("tkinter is required, but it's not available.") + + expressions = [ + r"x", + r"([],[])", + r"([x],[])", + r"([x],[man(x)])", + r"([x,y],[sees(x,y)])", + r"([x],[man(x), walks(x)])", + r"\x.([],[man(x), walks(x)])", + r"\x y.([],[sees(x,y)])", + r"([],[(([],[walks(x)]) + ([],[runs(x)]))])", + r"([x],[man(x), -([],[walks(x)])])", + r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])", + ] + + for e in expressions: + d = DrtExpression.fromstring(e) + d.draw() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py b/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..fe27c9fc66f92600ebdcb13eb622d3d07db36985 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py @@ -0,0 +1,553 @@ +# Natural Language Toolkit: GUI Demo for Glue Semantics with Discourse +# Representation Theory (DRT) as meaning language +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +try: + from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk + from tkinter.font import Font + + from nltk.draw.util import CanvasFrame, ShowText + +except ImportError: + """Ignore ImportError because tkinter might not be available.""" + +from nltk.parse import MaltParser +from nltk.sem.drt import DrsDrawer, DrtVariableExpression +from nltk.sem.glue import DrtGlue +from nltk.sem.logic import Variable +from nltk.tag import RegexpTagger +from nltk.util import in_idle + + +class DrtGlueDemo: + def __init__(self, examples): + # Set up the main window. + self._top = Tk() + self._top.title("DRT Glue Demo") + + # Set up key bindings. + self._init_bindings() + + # Initialize the fonts.self._error = None + self._init_fonts(self._top) + + self._examples = examples + self._readingCache = [None for example in examples] + + # The user can hide the grammar. + self._show_grammar = IntVar(self._top) + self._show_grammar.set(1) + + # Set the data to None + self._curExample = -1 + self._readings = [] + self._drs = None + self._drsWidget = None + self._error = None + + self._init_glue() + + # Create the basic frames. + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_exampleListbox(self._top) + self._init_readingListbox(self._top) + self._init_canvas(self._top) + + # Resize callback + self._canvas.bind("", self._configure) + + ######################################### + ## Initialization Helpers + ######################################### + + def _init_glue(self): + tagger = RegexpTagger( + [ + ("^(David|Mary|John)$", "NNP"), + ( + "^(walks|sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$", + "VB", + ), + ("^(go|order|vanish|find|approach)$", "VB"), + ("^(a)$", "ex_quant"), + ("^(every)$", "univ_quant"), + ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"), + ("^(big|gray|former)$", "JJ"), + ("^(him|himself)$", "PRP"), + ] + ) + + depparser = MaltParser(tagger=tagger) + self._glue = DrtGlue(depparser=depparser, remove_duplicates=False) + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + if self._size.get() < 0: + big = self._size.get() - 2 + else: + big = self._size.get() + 2 + self._bigfont = Font(family="helvetica", weight="bold", size=big) + + def _init_exampleListbox(self, parent): + self._exampleFrame = listframe = Frame(parent) + self._exampleFrame.pack(fill="both", side="left", padx=2) + self._exampleList_label = Label( + self._exampleFrame, font=self._boldfont, text="Examples" + ) + self._exampleList_label.pack() + self._exampleList = Listbox( + self._exampleFrame, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._exampleList.pack(side="right", fill="both", expand=1) + + for example in self._examples: + self._exampleList.insert("end", (" %s" % example)) + self._exampleList.config(height=min(len(self._examples), 25), width=40) + + # Add a scrollbar if there are more than 25 examples. + if len(self._examples) > 25: + listscroll = Scrollbar(self._exampleFrame, orient="vertical") + self._exampleList.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._exampleList.yview) + listscroll.pack(side="left", fill="y") + + # If they select a example, apply it. + self._exampleList.bind("<>", self._exampleList_select) + + def _init_readingListbox(self, parent): + self._readingFrame = listframe = Frame(parent) + self._readingFrame.pack(fill="both", side="left", padx=2) + self._readingList_label = Label( + self._readingFrame, font=self._boldfont, text="Readings" + ) + self._readingList_label.pack() + self._readingList = Listbox( + self._readingFrame, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._readingList.pack(side="right", fill="both", expand=1) + + # Add a scrollbar if there are more than 25 examples. + listscroll = Scrollbar(self._readingFrame, orient="vertical") + self._readingList.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._readingList.yview) + listscroll.pack(side="right", fill="y") + + self._populate_readingListbox() + + def _populate_readingListbox(self): + # Populate the listbox with integers + self._readingList.delete(0, "end") + for i in range(len(self._readings)): + self._readingList.insert("end", (" %s" % (i + 1))) + self._readingList.config(height=min(len(self._readings), 25), width=5) + + # If they select a example, apply it. + self._readingList.bind("<>", self._readingList_select) + + def _init_bindings(self): + # Key bindings are a good thing. + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("n", self.next) + self._top.bind("", self.next) + self._top.bind("p", self.prev) + self._top.bind("", self.prev) + + def _init_buttons(self, parent): + # Set up the frames. + self._buttonframe = buttonframe = Frame(parent) + buttonframe.pack(fill="none", side="bottom", padx=3, pady=2) + Button( + buttonframe, + text="Prev", + background="#90c0d0", + foreground="black", + command=self.prev, + ).pack(side="left") + Button( + buttonframe, + text="Next", + background="#90c0d0", + foreground="black", + command=self.next, + ).pack(side="left") + + def _configure(self, event): + self._autostep = 0 + (x1, y1, x2, y2) = self._cframe.scrollregion() + y2 = event.height - 6 + self._canvas["scrollregion"] = "%d %d %d %d" % (x1, y1, x2, y2) + self._redraw() + + def _init_canvas(self, parent): + self._cframe = CanvasFrame( + parent, + background="white", + # width=525, height=250, + closeenough=10, + border=2, + relief="sunken", + ) + self._cframe.pack(expand=1, fill="both", side="top", pady=2) + canvas = self._canvas = self._cframe.canvas() + + # Initially, there's no tree or text + self._tree = None + self._textwidgets = [] + self._textline = None + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + actionmenu = Menu(menubar, tearoff=0) + actionmenu.add_command( + label="Next", underline=0, command=self.next, accelerator="n, Space" + ) + actionmenu.add_command( + label="Previous", underline=0, command=self.prev, accelerator="p, Backspace" + ) + menubar.add_cascade(label="Action", underline=0, menu=actionmenu) + + optionmenu = Menu(menubar, tearoff=0) + optionmenu.add_checkbutton( + label="Remove Duplicates", + underline=0, + variable=self._glue.remove_duplicates, + command=self._toggle_remove_duplicates, + accelerator="r", + ) + menubar.add_cascade(label="Options", underline=0, menu=optionmenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + ######################################### + ## Main draw procedure + ######################################### + + def _redraw(self): + canvas = self._canvas + + # Delete the old DRS, widgets, etc. + if self._drsWidget is not None: + self._drsWidget.clear() + + if self._drs: + self._drsWidget = DrsWidget(self._canvas, self._drs) + self._drsWidget.draw() + + if self._error: + self._drsWidget = DrsWidget(self._canvas, self._error) + self._drsWidget.draw() + + ######################################### + ## Button Callbacks + ######################################### + + def destroy(self, *e): + self._autostep = 0 + if self._top is None: + return + self._top.destroy() + self._top = None + + def prev(self, *e): + selection = self._readingList.curselection() + readingListSize = self._readingList.size() + + # there are readings + if readingListSize > 0: + # if one reading is currently selected + if len(selection) == 1: + index = int(selection[0]) + + # if it's on (or before) the first item + if index <= 0: + self._select_previous_example() + else: + self._readingList_store_selection(index - 1) + + else: + # select its first reading + self._readingList_store_selection(readingListSize - 1) + + else: + self._select_previous_example() + + def _select_previous_example(self): + # if the current example is not the first example + if self._curExample > 0: + self._exampleList_store_selection(self._curExample - 1) + else: + # go to the last example + self._exampleList_store_selection(len(self._examples) - 1) + + def next(self, *e): + selection = self._readingList.curselection() + readingListSize = self._readingList.size() + + # if there are readings + if readingListSize > 0: + # if one reading is currently selected + if len(selection) == 1: + index = int(selection[0]) + + # if it's on (or past) the last item + if index >= (readingListSize - 1): + self._select_next_example() + else: + self._readingList_store_selection(index + 1) + + else: + # select its first reading + self._readingList_store_selection(0) + + else: + self._select_next_example() + + def _select_next_example(self): + # if the current example is not the last example + if self._curExample < len(self._examples) - 1: + self._exampleList_store_selection(self._curExample + 1) + else: + # go to the first example + self._exampleList_store_selection(0) + + def about(self, *e): + ABOUT = ( + "NLTK Discourse Representation Theory (DRT) Glue Semantics Demo\n" + + "Written by Daniel H. Garrette" + ) + TITLE = "About: NLTK DRT Glue Demo" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self._top, TITLE, ABOUT) + + def postscript(self, *e): + self._autostep = 0 + self._cframe.print_to_file() + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._boldfont.configure(size=-(abs(size))) + self._sysfont.configure(size=-(abs(size))) + self._bigfont.configure(size=-(abs(size + 2))) + self._redraw() + + def _toggle_remove_duplicates(self): + self._glue.remove_duplicates = not self._glue.remove_duplicates + + self._exampleList.selection_clear(0, "end") + self._readings = [] + self._populate_readingListbox() + self._readingCache = [None for ex in self._examples] + self._curExample = -1 + self._error = None + + self._drs = None + self._redraw() + + def _exampleList_select(self, event): + selection = self._exampleList.curselection() + if len(selection) != 1: + return + self._exampleList_store_selection(int(selection[0])) + + def _exampleList_store_selection(self, index): + self._curExample = index + example = self._examples[index] + + self._exampleList.selection_clear(0, "end") + if example: + cache = self._readingCache[index] + if cache: + if isinstance(cache, list): + self._readings = cache + self._error = None + else: + self._readings = [] + self._error = cache + else: + try: + self._readings = self._glue.parse_to_meaning(example) + self._error = None + self._readingCache[index] = self._readings + except Exception as e: + self._readings = [] + self._error = DrtVariableExpression(Variable("Error: " + str(e))) + self._readingCache[index] = self._error + + # add a star to the end of the example + self._exampleList.delete(index) + self._exampleList.insert(index, (" %s *" % example)) + self._exampleList.config( + height=min(len(self._examples), 25), width=40 + ) + + self._populate_readingListbox() + + self._exampleList.selection_set(index) + + self._drs = None + self._redraw() + + def _readingList_select(self, event): + selection = self._readingList.curselection() + if len(selection) != 1: + return + self._readingList_store_selection(int(selection[0])) + + def _readingList_store_selection(self, index): + reading = self._readings[index] + + self._readingList.selection_clear(0, "end") + if reading: + self._readingList.selection_set(index) + + self._drs = reading.simplify().normalize().resolve_anaphora() + + self._redraw() + + +class DrsWidget: + def __init__(self, canvas, drs, **attribs): + self._drs = drs + self._canvas = canvas + canvas.font = Font( + font=canvas.itemcget(canvas.create_text(0, 0, text=""), "font") + ) + canvas._BUFFER = 3 + self.bbox = (0, 0, 0, 0) + + def draw(self): + (right, bottom) = DrsDrawer(self._drs, canvas=self._canvas).draw() + self.bbox = (0, 0, right + 1, bottom + 1) + + def clear(self): + self._canvas.create_rectangle(self.bbox, fill="white", width="0") + + +def demo(): + examples = [ + "John walks", + "David sees Mary", + "David eats a sandwich", + "every man chases a dog", + # 'every man believes a dog yawns', + # 'John gives David a sandwich', + "John chases himself", + # 'John persuades David to order a pizza', + # 'John tries to go', + # 'John tries to find a unicorn', + # 'John seems to vanish', + # 'a unicorn seems to approach', + # 'every big cat leaves', + # 'every gray cat leaves', + # 'every big gray cat leaves', + # 'a former senator leaves', + # 'John likes a cat', + # 'John likes every cat', + # 'he walks', + # 'John walks and he leaves' + ] + DrtGlueDemo(examples).mainloop() + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/sem/evaluate.py b/lib/python3.10/site-packages/nltk/sem/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..bbff44f70b34ce0ac4de038b83a95a325d44abaf --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/evaluate.py @@ -0,0 +1,829 @@ +# Natural Language Toolkit: Models for first-order languages with lambda +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein , +# URL: +# For license information, see LICENSE.TXT + +# TODO: +# - fix tracing +# - fix iterator-based approach to existentials + +""" +This module provides data structures for representing first-order +models. +""" + +import inspect +import re +import sys +import textwrap +from pprint import pformat + +from nltk.decorators import decorator # this used in code that is commented out +from nltk.sem.logic import ( + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + EqualityExpression, + ExistsExpression, + Expression, + IffExpression, + ImpExpression, + IndividualVariableExpression, + IotaExpression, + LambdaExpression, + NegatedExpression, + OrExpression, + Variable, + is_indvar, +) + + +class Error(Exception): + pass + + +class Undefined(Error): + pass + + +def trace(f, *args, **kw): + argspec = inspect.getfullargspec(f) + d = dict(zip(argspec[0], args)) + if d.pop("trace", None): + print() + for item in d.items(): + print("%s => %s" % item) + return f(*args, **kw) + + +def is_rel(s): + """ + Check whether a set represents a relation (of any arity). + + :param s: a set containing tuples of str elements + :type s: set + :rtype: bool + """ + # we have the empty relation, i.e. set() + if len(s) == 0: + return True + # all the elements are tuples of the same length + elif all(isinstance(el, tuple) for el in s) and len(max(s)) == len(min(s)): + return True + else: + raise ValueError("Set %r contains sequences of different lengths" % s) + + +def set2rel(s): + """ + Convert a set containing individuals (strings or numbers) into a set of + unary tuples. Any tuples of strings already in the set are passed through + unchanged. + + For example: + - set(['a', 'b']) => set([('a',), ('b',)]) + - set([3, 27]) => set([('3',), ('27',)]) + + :type s: set + :rtype: set of tuple of str + """ + new = set() + for elem in s: + if isinstance(elem, str): + new.add((elem,)) + elif isinstance(elem, int): + new.add(str(elem)) + else: + new.add(elem) + return new + + +def arity(rel): + """ + Check the arity of a relation. + :type rel: set of tuples + :rtype: int of tuple of str + """ + if len(rel) == 0: + return 0 + return len(list(rel)[0]) + + +class Valuation(dict): + """ + A dictionary which represents a model-theoretic Valuation of non-logical constants. + Keys are strings representing the constants to be interpreted, and values correspond + to individuals (represented as strings) and n-ary relations (represented as sets of tuples + of strings). + + An instance of ``Valuation`` will raise a KeyError exception (i.e., + just behave like a standard dictionary) if indexed with an expression that + is not in its list of symbols. + """ + + def __init__(self, xs): + """ + :param xs: a list of (symbol, value) pairs. + """ + super().__init__() + for (sym, val) in xs: + if isinstance(val, str) or isinstance(val, bool): + self[sym] = val + elif isinstance(val, set): + self[sym] = set2rel(val) + else: + msg = textwrap.fill( + "Error in initializing Valuation. " + "Unrecognized value for symbol '%s':\n%s" % (sym, val), + width=66, + ) + + raise ValueError(msg) + + def __getitem__(self, key): + if key in self: + return dict.__getitem__(self, key) + else: + raise Undefined("Unknown expression: '%s'" % key) + + def __str__(self): + return pformat(self) + + @property + def domain(self): + """Set-theoretic domain of the value-space of a Valuation.""" + dom = [] + for val in self.values(): + if isinstance(val, str): + dom.append(val) + elif not isinstance(val, bool): + dom.extend( + [elem for tuple_ in val for elem in tuple_ if elem is not None] + ) + return set(dom) + + @property + def symbols(self): + """The non-logical constants which the Valuation recognizes.""" + return sorted(self.keys()) + + @classmethod + def fromstring(cls, s): + return read_valuation(s) + + +########################################## +# REs used by the _read_valuation function +########################################## +_VAL_SPLIT_RE = re.compile(r"\s*=+>\s*") +_ELEMENT_SPLIT_RE = re.compile(r"\s*,\s*") +_TUPLES_RE = re.compile( + r"""\s* + (\([^)]+\)) # tuple-expression + \s*""", + re.VERBOSE, +) + + +def _read_valuation_line(s): + """ + Read a line in a valuation file. + + Lines are expected to be of the form:: + + noosa => n + girl => {g1, g2} + chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)} + + :param s: input line + :type s: str + :return: a pair (symbol, value) + :rtype: tuple + """ + pieces = _VAL_SPLIT_RE.split(s) + symbol = pieces[0] + value = pieces[1] + # check whether the value is meant to be a set + if value.startswith("{"): + value = value[1:-1] + tuple_strings = _TUPLES_RE.findall(value) + # are the set elements tuples? + if tuple_strings: + set_elements = [] + for ts in tuple_strings: + ts = ts[1:-1] + element = tuple(_ELEMENT_SPLIT_RE.split(ts)) + set_elements.append(element) + else: + set_elements = _ELEMENT_SPLIT_RE.split(value) + value = set(set_elements) + return symbol, value + + +def read_valuation(s, encoding=None): + """ + Convert a valuation string into a valuation. + + :param s: a valuation string + :type s: str + :param encoding: the encoding of the input string, if it is binary + :type encoding: str + :return: a ``nltk.sem`` valuation + :rtype: Valuation + """ + if encoding is not None: + s = s.decode(encoding) + statements = [] + for linenum, line in enumerate(s.splitlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + statements.append(_read_valuation_line(line)) + except ValueError as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + return Valuation(statements) + + +class Assignment(dict): + r""" + A dictionary which represents an assignment of values to variables. + + An assignment can only assign values from its domain. + + If an unknown expression *a* is passed to a model *M*\ 's + interpretation function *i*, *i* will first check whether *M*\ 's + valuation assigns an interpretation to *a* as a constant, and if + this fails, *i* will delegate the interpretation of *a* to + *g*. *g* only assigns values to individual variables (i.e., + members of the class ``IndividualVariableExpression`` in the ``logic`` + module. If a variable is not assigned a value by *g*, it will raise + an ``Undefined`` exception. + + A variable *Assignment* is a mapping from individual variables to + entities in the domain. Individual variables are usually indicated + with the letters ``'x'``, ``'y'``, ``'w'`` and ``'z'``, optionally + followed by an integer (e.g., ``'x0'``, ``'y332'``). Assignments are + created using the ``Assignment`` constructor, which also takes the + domain as a parameter. + + >>> from nltk.sem.evaluate import Assignment + >>> dom = set(['u1', 'u2', 'u3', 'u4']) + >>> g3 = Assignment(dom, [('x', 'u1'), ('y', 'u2')]) + >>> g3 == {'x': 'u1', 'y': 'u2'} + True + + There is also a ``print`` format for assignments which uses a notation + closer to that in logic textbooks: + + >>> print(g3) + g[u1/x][u2/y] + + It is also possible to update an assignment using the ``add`` method: + + >>> dom = set(['u1', 'u2', 'u3', 'u4']) + >>> g4 = Assignment(dom) + >>> g4.add('x', 'u1') + {'x': 'u1'} + + With no arguments, ``purge()`` is equivalent to ``clear()`` on a dictionary: + + >>> g4.purge() + >>> g4 + {} + + :param domain: the domain of discourse + :type domain: set + :param assign: a list of (varname, value) associations + :type assign: list + """ + + def __init__(self, domain, assign=None): + super().__init__() + self.domain = domain + if assign: + for (var, val) in assign: + assert val in self.domain, "'{}' is not in the domain: {}".format( + val, + self.domain, + ) + assert is_indvar(var), ( + "Wrong format for an Individual Variable: '%s'" % var + ) + self[var] = val + self.variant = None + self._addvariant() + + def __getitem__(self, key): + if key in self: + return dict.__getitem__(self, key) + else: + raise Undefined("Not recognized as a variable: '%s'" % key) + + def copy(self): + new = Assignment(self.domain) + new.update(self) + return new + + def purge(self, var=None): + """ + Remove one or all keys (i.e. logic variables) from an + assignment, and update ``self.variant``. + + :param var: a Variable acting as a key for the assignment. + """ + if var: + del self[var] + else: + self.clear() + self._addvariant() + return None + + def __str__(self): + """ + Pretty printing for assignments. {'x', 'u'} appears as 'g[u/x]' + """ + gstring = "g" + # Deterministic output for unit testing. + variant = sorted(self.variant) + for (val, var) in variant: + gstring += f"[{val}/{var}]" + return gstring + + def _addvariant(self): + """ + Create a more pretty-printable version of the assignment. + """ + list_ = [] + for item in self.items(): + pair = (item[1], item[0]) + list_.append(pair) + self.variant = list_ + return None + + def add(self, var, val): + """ + Add a new variable-value pair to the assignment, and update + ``self.variant``. + + """ + assert val in self.domain, f"{val} is not in the domain {self.domain}" + assert is_indvar(var), "Wrong format for an Individual Variable: '%s'" % var + self[var] = val + self._addvariant() + return self + + +class Model: + """ + A first order model is a domain *D* of discourse and a valuation *V*. + + A domain *D* is a set, and a valuation *V* is a map that associates + expressions with values in the model. + The domain of *V* should be a subset of *D*. + + Construct a new ``Model``. + + :type domain: set + :param domain: A set of entities representing the domain of discourse of the model. + :type valuation: Valuation + :param valuation: the valuation of the model. + :param prop: If this is set, then we are building a propositional\ + model and don't require the domain of *V* to be subset of *D*. + """ + + def __init__(self, domain, valuation): + assert isinstance(domain, set) + self.domain = domain + self.valuation = valuation + if not domain.issuperset(valuation.domain): + raise Error( + "The valuation domain, %s, must be a subset of the model's domain, %s" + % (valuation.domain, domain) + ) + + def __repr__(self): + return f"({self.domain!r}, {self.valuation!r})" + + def __str__(self): + return f"Domain = {self.domain},\nValuation = \n{self.valuation}" + + def evaluate(self, expr, g, trace=None): + """ + Read input expressions, and provide a handler for ``satisfy`` + that blocks further propagation of the ``Undefined`` error. + :param expr: An ``Expression`` of ``logic``. + :type g: Assignment + :param g: an assignment to individual variables. + :rtype: bool or 'Undefined' + """ + try: + parsed = Expression.fromstring(expr) + value = self.satisfy(parsed, g, trace=trace) + if trace: + print() + print(f"'{expr}' evaluates to {value} under M, {g}") + return value + except Undefined: + if trace: + print() + print(f"'{expr}' is undefined under M, {g}") + return "Undefined" + + def satisfy(self, parsed, g, trace=None): + """ + Recursive interpretation function for a formula of first-order logic. + + Raises an ``Undefined`` error when ``parsed`` is an atomic string + but is not a symbol or an individual variable. + + :return: Returns a truth value or ``Undefined`` if ``parsed`` is\ + complex, and calls the interpretation function ``i`` if ``parsed``\ + is atomic. + + :param parsed: An expression of ``logic``. + :type g: Assignment + :param g: an assignment to individual variables. + """ + + if isinstance(parsed, ApplicationExpression): + function, arguments = parsed.uncurry() + if isinstance(function, AbstractVariableExpression): + # It's a predicate expression ("P(x,y)"), so used uncurried arguments + funval = self.satisfy(function, g) + argvals = tuple(self.satisfy(arg, g) for arg in arguments) + return argvals in funval + else: + # It must be a lambda expression, so use curried form + funval = self.satisfy(parsed.function, g) + argval = self.satisfy(parsed.argument, g) + return funval[argval] + elif isinstance(parsed, NegatedExpression): + return not self.satisfy(parsed.term, g) + elif isinstance(parsed, AndExpression): + return self.satisfy(parsed.first, g) and self.satisfy(parsed.second, g) + elif isinstance(parsed, OrExpression): + return self.satisfy(parsed.first, g) or self.satisfy(parsed.second, g) + elif isinstance(parsed, ImpExpression): + return (not self.satisfy(parsed.first, g)) or self.satisfy(parsed.second, g) + elif isinstance(parsed, IffExpression): + return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g) + elif isinstance(parsed, EqualityExpression): + return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g) + elif isinstance(parsed, AllExpression): + new_g = g.copy() + for u in self.domain: + new_g.add(parsed.variable.name, u) + if not self.satisfy(parsed.term, new_g): + return False + return True + elif isinstance(parsed, ExistsExpression): + new_g = g.copy() + for u in self.domain: + new_g.add(parsed.variable.name, u) + if self.satisfy(parsed.term, new_g): + return True + return False + elif isinstance(parsed, IotaExpression): + new_g = g.copy() + for u in self.domain: + new_g.add(parsed.variable.name, u) + if self.satisfy(parsed.term, new_g): + return True + return False + elif isinstance(parsed, LambdaExpression): + cf = {} + var = parsed.variable.name + for u in self.domain: + val = self.satisfy(parsed.term, g.add(var, u)) + # NB the dict would be a lot smaller if we do this: + # if val: cf[u] = val + # But then need to deal with cases where f(a) should yield + # a function rather than just False. + cf[u] = val + return cf + else: + return self.i(parsed, g, trace) + + # @decorator(trace_eval) + def i(self, parsed, g, trace=False): + """ + An interpretation function. + + Assuming that ``parsed`` is atomic: + + - if ``parsed`` is a non-logical constant, calls the valuation *V* + - else if ``parsed`` is an individual variable, calls assignment *g* + - else returns ``Undefined``. + + :param parsed: an ``Expression`` of ``logic``. + :type g: Assignment + :param g: an assignment to individual variables. + :return: a semantic value + """ + # If parsed is a propositional letter 'p', 'q', etc, it could be in valuation.symbols + # and also be an IndividualVariableExpression. We want to catch this first case. + # So there is a procedural consequence to the ordering of clauses here: + if parsed.variable.name in self.valuation.symbols: + return self.valuation[parsed.variable.name] + elif isinstance(parsed, IndividualVariableExpression): + return g[parsed.variable.name] + + else: + raise Undefined("Can't find a value for %s" % parsed) + + def satisfiers(self, parsed, varex, g, trace=None, nesting=0): + """ + Generate the entities from the model's domain that satisfy an open formula. + + :param parsed: an open formula + :type parsed: Expression + :param varex: the relevant free individual variable in ``parsed``. + :type varex: VariableExpression or str + :param g: a variable assignment + :type g: Assignment + :return: a set of the entities that satisfy ``parsed``. + """ + + spacer = " " + indent = spacer + (spacer * nesting) + candidates = [] + + if isinstance(varex, str): + var = Variable(varex) + else: + var = varex + + if var in parsed.free(): + if trace: + print() + print( + (spacer * nesting) + + f"Open formula is '{parsed}' with assignment {g}" + ) + for u in self.domain: + new_g = g.copy() + new_g.add(var.name, u) + if trace and trace > 1: + lowtrace = trace - 1 + else: + lowtrace = 0 + value = self.satisfy(parsed, new_g, lowtrace) + + if trace: + print(indent + "(trying assignment %s)" % new_g) + + # parsed == False under g[u/var]? + if value == False: + if trace: + print(indent + f"value of '{parsed}' under {new_g} is False") + + # so g[u/var] is a satisfying assignment + else: + candidates.append(u) + if trace: + print(indent + f"value of '{parsed}' under {new_g} is {value}") + + result = {c for c in candidates} + # var isn't free in parsed + else: + raise Undefined(f"{var.name} is not free in {parsed}") + + return result + + +# ////////////////////////////////////////////////////////////////////// +# Demo.. +# ////////////////////////////////////////////////////////////////////// +# number of spacer chars +mult = 30 + +# Demo 1: Propositional Logic +################# +def propdemo(trace=None): + """Example of a propositional model.""" + + global val1, dom1, m1, g1 + val1 = Valuation([("P", True), ("Q", True), ("R", False)]) + dom1 = set() + m1 = Model(dom1, val1) + g1 = Assignment(dom1) + + print() + print("*" * mult) + print("Propositional Formulas Demo") + print("*" * mult) + print("(Propositional constants treated as nullary predicates)") + print() + print("Model m1:\n", m1) + print("*" * mult) + sentences = [ + "(P & Q)", + "(P & R)", + "- P", + "- R", + "- - P", + "- (P & R)", + "(P | R)", + "(R | P)", + "(R | R)", + "(- P | R)", + "(P | - P)", + "(P -> Q)", + "(P -> R)", + "(R -> P)", + "(P <-> P)", + "(R <-> R)", + "(P <-> R)", + ] + + for sent in sentences: + if trace: + print() + m1.evaluate(sent, g1, trace) + else: + print(f"The value of '{sent}' is: {m1.evaluate(sent, g1)}") + + +# Demo 2: FOL Model +############# + + +def folmodel(quiet=False, trace=None): + """Example of a first-order model.""" + + global val2, v2, dom2, m2, g2 + + v2 = [ + ("adam", "b1"), + ("betty", "g1"), + ("fido", "d1"), + ("girl", {"g1", "g2"}), + ("boy", {"b1", "b2"}), + ("dog", {"d1"}), + ("love", {("b1", "g1"), ("b2", "g2"), ("g1", "b1"), ("g2", "b1")}), + ] + val2 = Valuation(v2) + dom2 = val2.domain + m2 = Model(dom2, val2) + g2 = Assignment(dom2, [("x", "b1"), ("y", "g2")]) + + if not quiet: + print() + print("*" * mult) + print("Models Demo") + print("*" * mult) + print("Model m2:\n", "-" * 14, "\n", m2) + print("Variable assignment = ", g2) + + exprs = ["adam", "boy", "love", "walks", "x", "y", "z"] + parsed_exprs = [Expression.fromstring(e) for e in exprs] + + print() + for parsed in parsed_exprs: + try: + print( + "The interpretation of '%s' in m2 is %s" + % (parsed, m2.i(parsed, g2)) + ) + except Undefined: + print("The interpretation of '%s' in m2 is Undefined" % parsed) + + applications = [ + ("boy", ("adam")), + ("walks", ("adam",)), + ("love", ("adam", "y")), + ("love", ("y", "adam")), + ] + + for (fun, args) in applications: + try: + funval = m2.i(Expression.fromstring(fun), g2) + argsval = tuple(m2.i(Expression.fromstring(arg), g2) for arg in args) + print(f"{fun}({args}) evaluates to {argsval in funval}") + except Undefined: + print(f"{fun}({args}) evaluates to Undefined") + + +# Demo 3: FOL +######### + + +def foldemo(trace=None): + """ + Interpretation of closed expressions in a first-order model. + """ + folmodel(quiet=True) + + print() + print("*" * mult) + print("FOL Formulas Demo") + print("*" * mult) + + formulas = [ + "love (adam, betty)", + "(adam = mia)", + "\\x. (boy(x) | girl(x))", + "\\x. boy(x)(adam)", + "\\x y. love(x, y)", + "\\x y. love(x, y)(adam)(betty)", + "\\x y. love(x, y)(adam, betty)", + "\\x y. (boy(x) & love(x, y))", + "\\x. exists y. (boy(x) & love(x, y))", + "exists z1. boy(z1)", + "exists x. (boy(x) & -(x = adam))", + "exists x. (boy(x) & all y. love(y, x))", + "all x. (boy(x) | girl(x))", + "all x. (girl(x) -> exists y. boy(y) & love(x, y))", # Every girl loves exists boy. + "exists x. (boy(x) & all y. (girl(y) -> love(y, x)))", # There is exists boy that every girl loves. + "exists x. (boy(x) & all y. (girl(y) -> love(x, y)))", # exists boy loves every girl. + "all x. (dog(x) -> - girl(x))", + "exists x. exists y. (love(x, y) & love(x, y))", + ] + + for fmla in formulas: + g2.purge() + if trace: + m2.evaluate(fmla, g2, trace) + else: + print(f"The value of '{fmla}' is: {m2.evaluate(fmla, g2)}") + + +# Demo 3: Satisfaction +############# + + +def satdemo(trace=None): + """Satisfiers of an open formula in a first order model.""" + + print() + print("*" * mult) + print("Satisfiers Demo") + print("*" * mult) + + folmodel(quiet=True) + + formulas = [ + "boy(x)", + "(x = x)", + "(boy(x) | girl(x))", + "(boy(x) & girl(x))", + "love(adam, x)", + "love(x, adam)", + "-(x = adam)", + "exists z22. love(x, z22)", + "exists y. love(y, x)", + "all y. (girl(y) -> love(x, y))", + "all y. (girl(y) -> love(y, x))", + "all y. (girl(y) -> (boy(x) & love(y, x)))", + "(boy(x) & all y. (girl(y) -> love(x, y)))", + "(boy(x) & all y. (girl(y) -> love(y, x)))", + "(boy(x) & exists y. (girl(y) & love(y, x)))", + "(girl(x) -> dog(x))", + "all y. (dog(y) -> (x = y))", + "exists y. love(y, x)", + "exists y. (love(adam, y) & love(y, x))", + ] + + if trace: + print(m2) + + for fmla in formulas: + print(fmla) + Expression.fromstring(fmla) + + parsed = [Expression.fromstring(fmla) for fmla in formulas] + + for p in parsed: + g2.purge() + print( + "The satisfiers of '{}' are: {}".format(p, m2.satisfiers(p, "x", g2, trace)) + ) + + +def demo(num=0, trace=None): + """ + Run exists demos. + + - num = 1: propositional logic demo + - num = 2: first order model demo (only if trace is set) + - num = 3: first order sentences demo + - num = 4: satisfaction of open formulas demo + - any other value: run all the demos + + :param trace: trace = 1, or trace = 2 for more verbose tracing + """ + demos = {1: propdemo, 2: folmodel, 3: foldemo, 4: satdemo} + + try: + demos[num](trace=trace) + except KeyError: + for num in demos: + demos[num](trace=trace) + + +if __name__ == "__main__": + demo(2, trace=0) diff --git a/lib/python3.10/site-packages/nltk/sem/glue.py b/lib/python3.10/site-packages/nltk/sem/glue.py new file mode 100644 index 0000000000000000000000000000000000000000..1098c83bec71cee14b6c06e93ba3f15366c0ada2 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/glue.py @@ -0,0 +1,835 @@ +# Natural Language Toolkit: Glue Semantics +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import os +from itertools import chain + +import nltk +from nltk.internals import Counter +from nltk.sem import drt, linearlogic +from nltk.sem.logic import ( + AbstractVariableExpression, + Expression, + LambdaExpression, + Variable, + VariableExpression, +) +from nltk.tag import BigramTagger, RegexpTagger, TrigramTagger, UnigramTagger + +SPEC_SEMTYPES = { + "a": "ex_quant", + "an": "ex_quant", + "every": "univ_quant", + "the": "def_art", + "no": "no_quant", + "default": "ex_quant", +} + +OPTIONAL_RELATIONSHIPS = ["nmod", "vmod", "punct"] + + +class GlueFormula: + def __init__(self, meaning, glue, indices=None): + if not indices: + indices = set() + + if isinstance(meaning, str): + self.meaning = Expression.fromstring(meaning) + elif isinstance(meaning, Expression): + self.meaning = meaning + else: + raise RuntimeError( + "Meaning term neither string or expression: %s, %s" + % (meaning, meaning.__class__) + ) + + if isinstance(glue, str): + self.glue = linearlogic.LinearLogicParser().parse(glue) + elif isinstance(glue, linearlogic.Expression): + self.glue = glue + else: + raise RuntimeError( + "Glue term neither string or expression: %s, %s" + % (glue, glue.__class__) + ) + + self.indices = indices + + def applyto(self, arg): + """self = (\\x.(walk x), (subj -o f)) + arg = (john , subj) + returns ((walk john), f) + """ + if self.indices & arg.indices: # if the sets are NOT disjoint + raise linearlogic.LinearLogicApplicationException( + f"'{self}' applied to '{arg}'. Indices are not disjoint." + ) + else: # if the sets ARE disjoint + return_indices = self.indices | arg.indices + + try: + return_glue = linearlogic.ApplicationExpression( + self.glue, arg.glue, arg.indices + ) + except linearlogic.LinearLogicApplicationException as e: + raise linearlogic.LinearLogicApplicationException( + f"'{self.simplify()}' applied to '{arg.simplify()}'" + ) from e + + arg_meaning_abstracted = arg.meaning + if return_indices: + for dep in self.glue.simplify().antecedent.dependencies[ + ::-1 + ]: # if self.glue is (A -o B), dep is in A.dependencies + arg_meaning_abstracted = self.make_LambdaExpression( + Variable("v%s" % dep), arg_meaning_abstracted + ) + return_meaning = self.meaning.applyto(arg_meaning_abstracted) + + return self.__class__(return_meaning, return_glue, return_indices) + + def make_VariableExpression(self, name): + return VariableExpression(name) + + def make_LambdaExpression(self, variable, term): + return LambdaExpression(variable, term) + + def lambda_abstract(self, other): + assert isinstance(other, GlueFormula) + assert isinstance(other.meaning, AbstractVariableExpression) + return self.__class__( + self.make_LambdaExpression(other.meaning.variable, self.meaning), + linearlogic.ImpExpression(other.glue, self.glue), + ) + + def compile(self, counter=None): + """From Iddo Lev's PhD Dissertation p108-109""" + if not counter: + counter = Counter() + (compiled_glue, new_forms) = self.glue.simplify().compile_pos( + counter, self.__class__ + ) + return new_forms + [ + self.__class__(self.meaning, compiled_glue, {counter.get()}) + ] + + def simplify(self): + return self.__class__( + self.meaning.simplify(), self.glue.simplify(), self.indices + ) + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.meaning == other.meaning + and self.glue == other.glue + ) + + def __ne__(self, other): + return not self == other + + # sorting for use in doctests which must be deterministic + def __lt__(self, other): + return str(self) < str(other) + + def __str__(self): + assert isinstance(self.indices, set) + accum = f"{self.meaning} : {self.glue}" + if self.indices: + accum += ( + " : {" + ", ".join(str(index) for index in sorted(self.indices)) + "}" + ) + return accum + + def __repr__(self): + return "%s" % self + + +class GlueDict(dict): + def __init__(self, filename, encoding=None): + self.filename = filename + self.file_encoding = encoding + self.read_file() + + def read_file(self, empty_first=True): + if empty_first: + self.clear() + + try: + contents = nltk.data.load( + self.filename, format="text", encoding=self.file_encoding + ) + # TODO: the above can't handle zip files, but this should anyway be fixed in nltk.data.load() + except LookupError as e: + try: + contents = nltk.data.load( + "file:" + self.filename, format="text", encoding=self.file_encoding + ) + except LookupError: + raise e + lines = contents.splitlines() + + for line in lines: # example: 'n : (\\x.( x), (v-or))' + # lambdacalc -^ linear logic -^ + line = line.strip() # remove trailing newline + if not len(line): + continue # skip empty lines + if line[0] == "#": + continue # skip commented out lines + + parts = line.split( + " : ", 2 + ) # ['verb', '(\\x.( x), ( subj -o f ))', '[subj]'] + + glue_formulas = [] + paren_count = 0 + tuple_start = 0 + tuple_comma = 0 + + relationships = None + + if len(parts) > 1: + for (i, c) in enumerate(parts[1]): + if c == "(": + if paren_count == 0: # if it's the first '(' of a tuple + tuple_start = i + 1 # then save the index + paren_count += 1 + elif c == ")": + paren_count -= 1 + if paren_count == 0: # if it's the last ')' of a tuple + meaning_term = parts[1][ + tuple_start:tuple_comma + ] # '\\x.( x)' + glue_term = parts[1][tuple_comma + 1 : i] # '(v-r)' + glue_formulas.append( + [meaning_term, glue_term] + ) # add the GlueFormula to the list + elif c == ",": + if ( + paren_count == 1 + ): # if it's a comma separating the parts of the tuple + tuple_comma = i # then save the index + elif c == "#": # skip comments at the ends of lines + if ( + paren_count != 0 + ): # if the line hasn't parsed correctly so far + raise RuntimeError( + "Formula syntax is incorrect for entry " + line + ) + break # break to the next line + + if len(parts) > 2: # if there is a relationship entry at the end + rel_start = parts[2].index("[") + 1 + rel_end = parts[2].index("]") + if rel_start == rel_end: + relationships = frozenset() + else: + relationships = frozenset( + r.strip() for r in parts[2][rel_start:rel_end].split(",") + ) + + try: + start_inheritance = parts[0].index("(") + end_inheritance = parts[0].index(")") + sem = parts[0][:start_inheritance].strip() + supertype = parts[0][start_inheritance + 1 : end_inheritance] + except: + sem = parts[0].strip() + supertype = None + + if sem not in self: + self[sem] = {} + + if ( + relationships is None + ): # if not specified for a specific relationship set + # add all relationship entries for parents + if supertype: + for rels in self[supertype]: + if rels not in self[sem]: + self[sem][rels] = [] + glue = self[supertype][rels] + self[sem][rels].extend(glue) + self[sem][rels].extend( + glue_formulas + ) # add the glue formulas to every rel entry + else: + if None not in self[sem]: + self[sem][None] = [] + self[sem][None].extend( + glue_formulas + ) # add the glue formulas to every rel entry + else: + if relationships not in self[sem]: + self[sem][relationships] = [] + if supertype: + self[sem][relationships].extend(self[supertype][relationships]) + self[sem][relationships].extend( + glue_formulas + ) # add the glue entry to the dictionary + + def __str__(self): + accum = "" + for pos in self: + str_pos = "%s" % pos + for relset in self[pos]: + i = 1 + for gf in self[pos][relset]: + if i == 1: + accum += str_pos + ": " + else: + accum += " " * (len(str_pos) + 2) + accum += "%s" % gf + if relset and i == len(self[pos][relset]): + accum += " : %s" % relset + accum += "\n" + i += 1 + return accum + + def to_glueformula_list(self, depgraph, node=None, counter=None, verbose=False): + if node is None: + # TODO: should it be depgraph.root? Is this code tested? + top = depgraph.nodes[0] + depList = list(chain.from_iterable(top["deps"].values())) + root = depgraph.nodes[depList[0]] + + return self.to_glueformula_list(depgraph, root, Counter(), verbose) + + glueformulas = self.lookup(node, depgraph, counter) + for dep_idx in chain.from_iterable(node["deps"].values()): + dep = depgraph.nodes[dep_idx] + glueformulas.extend( + self.to_glueformula_list(depgraph, dep, counter, verbose) + ) + return glueformulas + + def lookup(self, node, depgraph, counter): + semtype_names = self.get_semtypes(node) + + semtype = None + for name in semtype_names: + if name in self: + semtype = self[name] + break + if semtype is None: + # raise KeyError, "There is no GlueDict entry for sem type '%s' (for '%s')" % (sem, word) + return [] + + self.add_missing_dependencies(node, depgraph) + + lookup = self._lookup_semtype_option(semtype, node, depgraph) + + if not len(lookup): + raise KeyError( + "There is no GlueDict entry for sem type of '%s' " + "with tag '%s', and rel '%s'" % (node["word"], node["tag"], node["rel"]) + ) + + return self.get_glueformulas_from_semtype_entry( + lookup, node["word"], node, depgraph, counter + ) + + def add_missing_dependencies(self, node, depgraph): + rel = node["rel"].lower() + + if rel == "main": + headnode = depgraph.nodes[node["head"]] + subj = self.lookup_unique("subj", headnode, depgraph) + relation = subj["rel"] + node["deps"].setdefault(relation, []) + node["deps"][relation].append(subj["address"]) + # node['deps'].append(subj['address']) + + def _lookup_semtype_option(self, semtype, node, depgraph): + relationships = frozenset( + depgraph.nodes[dep]["rel"].lower() + for dep in chain.from_iterable(node["deps"].values()) + if depgraph.nodes[dep]["rel"].lower() not in OPTIONAL_RELATIONSHIPS + ) + + try: + lookup = semtype[relationships] + except KeyError: + # An exact match is not found, so find the best match where + # 'best' is defined as the glue entry whose relationship set has the + # most relations of any possible relationship set that is a subset + # of the actual depgraph + best_match = frozenset() + for relset_option in set(semtype) - {None}: + if ( + len(relset_option) > len(best_match) + and relset_option < relationships + ): + best_match = relset_option + if not best_match: + if None in semtype: + best_match = None + else: + return None + lookup = semtype[best_match] + + return lookup + + def get_semtypes(self, node): + """ + Based on the node, return a list of plausible semtypes in order of + plausibility. + """ + rel = node["rel"].lower() + word = node["word"].lower() + + if rel == "spec": + if word in SPEC_SEMTYPES: + return [SPEC_SEMTYPES[word]] + else: + return [SPEC_SEMTYPES["default"]] + elif rel in ["nmod", "vmod"]: + return [node["tag"], rel] + else: + return [node["tag"]] + + def get_glueformulas_from_semtype_entry( + self, lookup, word, node, depgraph, counter + ): + glueformulas = [] + + glueFormulaFactory = self.get_GlueFormula_factory() + for meaning, glue in lookup: + gf = glueFormulaFactory(self.get_meaning_formula(meaning, word), glue) + if not len(glueformulas): + gf.word = word + else: + gf.word = f"{word}{len(glueformulas) + 1}" + + gf.glue = self.initialize_labels(gf.glue, node, depgraph, counter.get()) + + glueformulas.append(gf) + return glueformulas + + def get_meaning_formula(self, generic, word): + """ + :param generic: A meaning formula string containing the + parameter "" + :param word: The actual word to be replace "" + """ + word = word.replace(".", "") + return generic.replace("", word) + + def initialize_labels(self, expr, node, depgraph, unique_index): + if isinstance(expr, linearlogic.AtomicExpression): + name = self.find_label_name(expr.name.lower(), node, depgraph, unique_index) + if name[0].isupper(): + return linearlogic.VariableExpression(name) + else: + return linearlogic.ConstantExpression(name) + else: + return linearlogic.ImpExpression( + self.initialize_labels(expr.antecedent, node, depgraph, unique_index), + self.initialize_labels(expr.consequent, node, depgraph, unique_index), + ) + + def find_label_name(self, name, node, depgraph, unique_index): + try: + dot = name.index(".") + + before_dot = name[:dot] + after_dot = name[dot + 1 :] + if before_dot == "super": + return self.find_label_name( + after_dot, depgraph.nodes[node["head"]], depgraph, unique_index + ) + else: + return self.find_label_name( + after_dot, + self.lookup_unique(before_dot, node, depgraph), + depgraph, + unique_index, + ) + except ValueError: + lbl = self.get_label(node) + if name == "f": + return lbl + elif name == "v": + return "%sv" % lbl + elif name == "r": + return "%sr" % lbl + elif name == "super": + return self.get_label(depgraph.nodes[node["head"]]) + elif name == "var": + return f"{lbl.upper()}{unique_index}" + elif name == "a": + return self.get_label(self.lookup_unique("conja", node, depgraph)) + elif name == "b": + return self.get_label(self.lookup_unique("conjb", node, depgraph)) + else: + return self.get_label(self.lookup_unique(name, node, depgraph)) + + def get_label(self, node): + """ + Pick an alphabetic character as identifier for an entity in the model. + + :param value: where to index into the list of characters + :type value: int + """ + value = node["address"] + + letter = [ + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "a", + "b", + "c", + "d", + "e", + ][value - 1] + num = int(value) // 26 + if num > 0: + return letter + str(num) + else: + return letter + + def lookup_unique(self, rel, node, depgraph): + """ + Lookup 'key'. There should be exactly one item in the associated relation. + """ + deps = [ + depgraph.nodes[dep] + for dep in chain.from_iterable(node["deps"].values()) + if depgraph.nodes[dep]["rel"].lower() == rel.lower() + ] + + if len(deps) == 0: + raise KeyError( + "'{}' doesn't contain a feature '{}'".format(node["word"], rel) + ) + elif len(deps) > 1: + raise KeyError( + "'{}' should only have one feature '{}'".format(node["word"], rel) + ) + else: + return deps[0] + + def get_GlueFormula_factory(self): + return GlueFormula + + +class Glue: + def __init__( + self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False + ): + self.verbose = verbose + self.remove_duplicates = remove_duplicates + self.depparser = depparser + + from nltk import Prover9 + + self.prover = Prover9() + + if semtype_file: + self.semtype_file = semtype_file + else: + self.semtype_file = os.path.join( + "grammars", "sample_grammars", "glue.semtype" + ) + + def train_depparser(self, depgraphs=None): + if depgraphs: + self.depparser.train(depgraphs) + else: + self.depparser.train_from_file( + nltk.data.find( + os.path.join("grammars", "sample_grammars", "glue_train.conll") + ) + ) + + def parse_to_meaning(self, sentence): + readings = [] + for agenda in self.parse_to_compiled(sentence): + readings.extend(self.get_readings(agenda)) + return readings + + def get_readings(self, agenda): + readings = [] + agenda_length = len(agenda) + atomics = dict() + nonatomics = dict() + while agenda: # is not empty + cur = agenda.pop() + glue_simp = cur.glue.simplify() + if isinstance( + glue_simp, linearlogic.ImpExpression + ): # if cur.glue is non-atomic + for key in atomics: + try: + if isinstance(cur.glue, linearlogic.ApplicationExpression): + bindings = cur.glue.bindings + else: + bindings = linearlogic.BindingDict() + glue_simp.antecedent.unify(key, bindings) + for atomic in atomics[key]: + if not ( + cur.indices & atomic.indices + ): # if the sets of indices are disjoint + try: + agenda.append(cur.applyto(atomic)) + except linearlogic.LinearLogicApplicationException: + pass + except linearlogic.UnificationException: + pass + try: + nonatomics[glue_simp.antecedent].append(cur) + except KeyError: + nonatomics[glue_simp.antecedent] = [cur] + + else: # else cur.glue is atomic + for key in nonatomics: + for nonatomic in nonatomics[key]: + try: + if isinstance( + nonatomic.glue, linearlogic.ApplicationExpression + ): + bindings = nonatomic.glue.bindings + else: + bindings = linearlogic.BindingDict() + glue_simp.unify(key, bindings) + if not ( + cur.indices & nonatomic.indices + ): # if the sets of indices are disjoint + try: + agenda.append(nonatomic.applyto(cur)) + except linearlogic.LinearLogicApplicationException: + pass + except linearlogic.UnificationException: + pass + try: + atomics[glue_simp].append(cur) + except KeyError: + atomics[glue_simp] = [cur] + + for entry in atomics: + for gf in atomics[entry]: + if len(gf.indices) == agenda_length: + self._add_to_reading_list(gf, readings) + for entry in nonatomics: + for gf in nonatomics[entry]: + if len(gf.indices) == agenda_length: + self._add_to_reading_list(gf, readings) + return readings + + def _add_to_reading_list(self, glueformula, reading_list): + add_reading = True + if self.remove_duplicates: + for reading in reading_list: + try: + if reading.equiv(glueformula.meaning, self.prover): + add_reading = False + break + except Exception as e: + # if there is an exception, the syntax of the formula + # may not be understandable by the prover, so don't + # throw out the reading. + print("Error when checking logical equality of statements", e) + + if add_reading: + reading_list.append(glueformula.meaning) + + def parse_to_compiled(self, sentence): + gfls = [self.depgraph_to_glue(dg) for dg in self.dep_parse(sentence)] + return [self.gfl_to_compiled(gfl) for gfl in gfls] + + def dep_parse(self, sentence): + """ + Return a dependency graph for the sentence. + + :param sentence: the sentence to be parsed + :type sentence: list(str) + :rtype: DependencyGraph + """ + + # Lazy-initialize the depparser + if self.depparser is None: + from nltk.parse import MaltParser + + self.depparser = MaltParser(tagger=self.get_pos_tagger()) + if not self.depparser._trained: + self.train_depparser() + return self.depparser.parse(sentence, verbose=self.verbose) + + def depgraph_to_glue(self, depgraph): + return self.get_glue_dict().to_glueformula_list(depgraph) + + def get_glue_dict(self): + return GlueDict(self.semtype_file) + + def gfl_to_compiled(self, gfl): + index_counter = Counter() + return_list = [] + for gf in gfl: + return_list.extend(gf.compile(index_counter)) + + if self.verbose: + print("Compiled Glue Premises:") + for cgf in return_list: + print(cgf) + + return return_list + + def get_pos_tagger(self): + from nltk.corpus import brown + + regexp_tagger = RegexpTagger( + [ + (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers + (r"(The|the|A|a|An|an)$", "AT"), # articles + (r".*able$", "JJ"), # adjectives + (r".*ness$", "NN"), # nouns formed from adjectives + (r".*ly$", "RB"), # adverbs + (r".*s$", "NNS"), # plural nouns + (r".*ing$", "VBG"), # gerunds + (r".*ed$", "VBD"), # past tense verbs + (r".*", "NN"), # nouns (default) + ] + ) + brown_train = brown.tagged_sents(categories="news") + unigram_tagger = UnigramTagger(brown_train, backoff=regexp_tagger) + bigram_tagger = BigramTagger(brown_train, backoff=unigram_tagger) + trigram_tagger = TrigramTagger(brown_train, backoff=bigram_tagger) + + # Override particular words + main_tagger = RegexpTagger( + [(r"(A|a|An|an)$", "ex_quant"), (r"(Every|every|All|all)$", "univ_quant")], + backoff=trigram_tagger, + ) + + return main_tagger + + +class DrtGlueFormula(GlueFormula): + def __init__(self, meaning, glue, indices=None): + if not indices: + indices = set() + + if isinstance(meaning, str): + self.meaning = drt.DrtExpression.fromstring(meaning) + elif isinstance(meaning, drt.DrtExpression): + self.meaning = meaning + else: + raise RuntimeError( + "Meaning term neither string or expression: %s, %s" + % (meaning, meaning.__class__) + ) + + if isinstance(glue, str): + self.glue = linearlogic.LinearLogicParser().parse(glue) + elif isinstance(glue, linearlogic.Expression): + self.glue = glue + else: + raise RuntimeError( + "Glue term neither string or expression: %s, %s" + % (glue, glue.__class__) + ) + + self.indices = indices + + def make_VariableExpression(self, name): + return drt.DrtVariableExpression(name) + + def make_LambdaExpression(self, variable, term): + return drt.DrtLambdaExpression(variable, term) + + +class DrtGlueDict(GlueDict): + def get_GlueFormula_factory(self): + return DrtGlueFormula + + +class DrtGlue(Glue): + def __init__( + self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False + ): + if not semtype_file: + semtype_file = os.path.join( + "grammars", "sample_grammars", "drt_glue.semtype" + ) + Glue.__init__(self, semtype_file, remove_duplicates, depparser, verbose) + + def get_glue_dict(self): + return DrtGlueDict(self.semtype_file) + + +def demo(show_example=-1): + from nltk.parse import MaltParser + + examples = [ + "David sees Mary", + "David eats a sandwich", + "every man chases a dog", + "every man believes a dog sleeps", + "John gives David a sandwich", + "John chases himself", + ] + # 'John persuades David to order a pizza', + # 'John tries to go', + # 'John tries to find a unicorn', + # 'John seems to vanish', + # 'a unicorn seems to approach', + # 'every big cat leaves', + # 'every gray cat leaves', + # 'every big gray cat leaves', + # 'a former senator leaves', + + print("============== DEMO ==============") + + tagger = RegexpTagger( + [ + ("^(David|Mary|John)$", "NNP"), + ( + "^(sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$", + "VB", + ), + ("^(go|order|vanish|find|approach)$", "VB"), + ("^(a)$", "ex_quant"), + ("^(every)$", "univ_quant"), + ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"), + ("^(big|gray|former)$", "JJ"), + ("^(him|himself)$", "PRP"), + ] + ) + + depparser = MaltParser(tagger=tagger) + glue = Glue(depparser=depparser, verbose=False) + + for (i, sentence) in enumerate(examples): + if i == show_example or show_example == -1: + print(f"[[[Example {i}]]] {sentence}") + for reading in glue.parse_to_meaning(sentence.split()): + print(reading.simplify()) + print("") + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/sem/hole.py b/lib/python3.10/site-packages/nltk/sem/hole.py new file mode 100644 index 0000000000000000000000000000000000000000..4570cb02a3bf183a73a1f9b5e78b8f0e1dac430f --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/hole.py @@ -0,0 +1,395 @@ +# Natural Language Toolkit: Logic +# +# Author: Peter Wang +# Updated by: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +An implementation of the Hole Semantics model, following Blackburn and Bos, +Representation and Inference for Natural Language (CSLI, 2005). + +The semantic representations are built by the grammar hole.fcfg. +This module contains driver code to read in sentences and parse them +according to a hole semantics grammar. + +After parsing, the semantic representation is in the form of an underspecified +representation that is not easy to read. We use a "plugging" algorithm to +convert that representation into first-order logic formulas. +""" + +from functools import reduce + +from nltk.parse import load_parser +from nltk.sem.logic import ( + AllExpression, + AndExpression, + ApplicationExpression, + ExistsExpression, + IffExpression, + ImpExpression, + LambdaExpression, + NegatedExpression, + OrExpression, +) +from nltk.sem.skolemize import skolemize + +# Note that in this code there may be multiple types of trees being referred to: +# +# 1. parse trees +# 2. the underspecified representation +# 3. first-order logic formula trees +# 4. the search space when plugging (search tree) +# + + +class Constants: + ALL = "ALL" + EXISTS = "EXISTS" + NOT = "NOT" + AND = "AND" + OR = "OR" + IMP = "IMP" + IFF = "IFF" + PRED = "PRED" + LEQ = "LEQ" + HOLE = "HOLE" + LABEL = "LABEL" + + MAP = { + ALL: lambda v, e: AllExpression(v.variable, e), + EXISTS: lambda v, e: ExistsExpression(v.variable, e), + NOT: NegatedExpression, + AND: AndExpression, + OR: OrExpression, + IMP: ImpExpression, + IFF: IffExpression, + PRED: ApplicationExpression, + } + + +class HoleSemantics: + """ + This class holds the broken-down components of a hole semantics, i.e. it + extracts the holes, labels, logic formula fragments and constraints out of + a big conjunction of such as produced by the hole semantics grammar. It + then provides some operations on the semantics dealing with holes, labels + and finding legal ways to plug holes with labels. + """ + + def __init__(self, usr): + """ + Constructor. `usr' is a ``sem.Expression`` representing an + Underspecified Representation Structure (USR). A USR has the following + special predicates: + ALL(l,v,n), + EXISTS(l,v,n), + AND(l,n,n), + OR(l,n,n), + IMP(l,n,n), + IFF(l,n,n), + PRED(l,v,n,v[,v]*) where the brackets and star indicate zero or more repetitions, + LEQ(n,n), + HOLE(n), + LABEL(n) + where l is the label of the node described by the predicate, n is either + a label or a hole, and v is a variable. + """ + self.holes = set() + self.labels = set() + self.fragments = {} # mapping of label -> formula fragment + self.constraints = set() # set of Constraints + self._break_down(usr) + self.top_most_labels = self._find_top_most_labels() + self.top_hole = self._find_top_hole() + + def is_node(self, x): + """ + Return true if x is a node (label or hole) in this semantic + representation. + """ + return x in (self.labels | self.holes) + + def _break_down(self, usr): + """ + Extract holes, labels, formula fragments and constraints from the hole + semantics underspecified representation (USR). + """ + if isinstance(usr, AndExpression): + self._break_down(usr.first) + self._break_down(usr.second) + elif isinstance(usr, ApplicationExpression): + func, args = usr.uncurry() + if func.variable.name == Constants.LEQ: + self.constraints.add(Constraint(args[0], args[1])) + elif func.variable.name == Constants.HOLE: + self.holes.add(args[0]) + elif func.variable.name == Constants.LABEL: + self.labels.add(args[0]) + else: + label = args[0] + assert label not in self.fragments + self.fragments[label] = (func, args[1:]) + else: + raise ValueError(usr.label()) + + def _find_top_nodes(self, node_list): + top_nodes = node_list.copy() + for f in self.fragments.values(): + # the label is the first argument of the predicate + args = f[1] + for arg in args: + if arg in node_list: + top_nodes.discard(arg) + return top_nodes + + def _find_top_most_labels(self): + """ + Return the set of labels which are not referenced directly as part of + another formula fragment. These will be the top-most labels for the + subtree that they are part of. + """ + return self._find_top_nodes(self.labels) + + def _find_top_hole(self): + """ + Return the hole that will be the top of the formula tree. + """ + top_holes = self._find_top_nodes(self.holes) + assert len(top_holes) == 1 # it must be unique + return top_holes.pop() + + def pluggings(self): + """ + Calculate and return all the legal pluggings (mappings of labels to + holes) of this semantics given the constraints. + """ + record = [] + self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {}, record) + return record + + def _plug_nodes(self, queue, potential_labels, plug_acc, record): + """ + Plug the nodes in `queue' with the labels in `potential_labels'. + + Each element of `queue' is a tuple of the node to plug and the list of + ancestor holes from the root of the graph to that node. + + `potential_labels' is a set of the labels which are still available for + plugging. + + `plug_acc' is the incomplete mapping of holes to labels made on the + current branch of the search tree so far. + + `record' is a list of all the complete pluggings that we have found in + total so far. It is the only parameter that is destructively updated. + """ + if queue != []: + (node, ancestors) = queue[0] + if node in self.holes: + # The node is a hole, try to plug it. + self._plug_hole( + node, ancestors, queue[1:], potential_labels, plug_acc, record + ) + else: + assert node in self.labels + # The node is a label. Replace it in the queue by the holes and + # labels in the formula fragment named by that label. + args = self.fragments[node][1] + head = [(a, ancestors) for a in args if self.is_node(a)] + self._plug_nodes(head + queue[1:], potential_labels, plug_acc, record) + else: + raise Exception("queue empty") + + def _plug_hole(self, hole, ancestors0, queue, potential_labels0, plug_acc0, record): + """ + Try all possible ways of plugging a single hole. + See _plug_nodes for the meanings of the parameters. + """ + # Add the current hole we're trying to plug into the list of ancestors. + assert hole not in ancestors0 + ancestors = [hole] + ancestors0 + + # Try each potential label in this hole in turn. + for l in potential_labels0: + # Is the label valid in this hole? + if self._violates_constraints(l, ancestors): + continue + + plug_acc = plug_acc0.copy() + plug_acc[hole] = l + potential_labels = potential_labels0.copy() + potential_labels.remove(l) + + if len(potential_labels) == 0: + # No more potential labels. That must mean all the holes have + # been filled so we have found a legal plugging so remember it. + # + # Note that the queue might not be empty because there might + # be labels on there that point to formula fragments with + # no holes in them. _sanity_check_plugging will make sure + # all holes are filled. + self._sanity_check_plugging(plug_acc, self.top_hole, []) + record.append(plug_acc) + else: + # Recursively try to fill in the rest of the holes in the + # queue. The label we just plugged into the hole could have + # holes of its own so at the end of the queue. Putting it on + # the end of the queue gives us a breadth-first search, so that + # all the holes at level i of the formula tree are filled + # before filling level i+1. + # A depth-first search would work as well since the trees must + # be finite but the bookkeeping would be harder. + self._plug_nodes( + queue + [(l, ancestors)], potential_labels, plug_acc, record + ) + + def _violates_constraints(self, label, ancestors): + """ + Return True if the `label' cannot be placed underneath the holes given + by the set `ancestors' because it would violate the constraints imposed + on it. + """ + for c in self.constraints: + if c.lhs == label: + if c.rhs not in ancestors: + return True + return False + + def _sanity_check_plugging(self, plugging, node, ancestors): + """ + Make sure that a given plugging is legal. We recursively go through + each node and make sure that no constraints are violated. + We also check that all holes have been filled. + """ + if node in self.holes: + ancestors = [node] + ancestors + label = plugging[node] + else: + label = node + assert label in self.labels + for c in self.constraints: + if c.lhs == label: + assert c.rhs in ancestors + args = self.fragments[label][1] + for arg in args: + if self.is_node(arg): + self._sanity_check_plugging(plugging, arg, [label] + ancestors) + + def formula_tree(self, plugging): + """ + Return the first-order logic formula tree for this underspecified + representation using the plugging given. + """ + return self._formula_tree(plugging, self.top_hole) + + def _formula_tree(self, plugging, node): + if node in plugging: + return self._formula_tree(plugging, plugging[node]) + elif node in self.fragments: + pred, args = self.fragments[node] + children = [self._formula_tree(plugging, arg) for arg in args] + return reduce(Constants.MAP[pred.variable.name], children) + else: + return node + + +class Constraint: + """ + This class represents a constraint of the form (L =< N), + where L is a label and N is a node (a label or a hole). + """ + + def __init__(self, lhs, rhs): + self.lhs = lhs + self.rhs = rhs + + def __eq__(self, other): + if self.__class__ == other.__class__: + return self.lhs == other.lhs and self.rhs == other.rhs + else: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash(repr(self)) + + def __repr__(self): + return f"({self.lhs} < {self.rhs})" + + +def hole_readings(sentence, grammar_filename=None, verbose=False): + if not grammar_filename: + grammar_filename = "grammars/sample_grammars/hole.fcfg" + + if verbose: + print("Reading grammar file", grammar_filename) + + parser = load_parser(grammar_filename) + + # Parse the sentence. + tokens = sentence.split() + trees = list(parser.parse(tokens)) + if verbose: + print("Got %d different parses" % len(trees)) + + all_readings = [] + for tree in trees: + # Get the semantic feature from the top of the parse tree. + sem = tree.label()["SEM"].simplify() + + # Print the raw semantic representation. + if verbose: + print("Raw: ", sem) + + # Skolemize away all quantifiers. All variables become unique. + while isinstance(sem, LambdaExpression): + sem = sem.term + skolemized = skolemize(sem) + + if verbose: + print("Skolemized:", skolemized) + + # Break the hole semantics representation down into its components + # i.e. holes, labels, formula fragments and constraints. + hole_sem = HoleSemantics(skolemized) + + # Maybe show the details of the semantic representation. + if verbose: + print("Holes: ", hole_sem.holes) + print("Labels: ", hole_sem.labels) + print("Constraints: ", hole_sem.constraints) + print("Top hole: ", hole_sem.top_hole) + print("Top labels: ", hole_sem.top_most_labels) + print("Fragments:") + for l, f in hole_sem.fragments.items(): + print(f"\t{l}: {f}") + + # Find all the possible ways to plug the formulas together. + pluggings = hole_sem.pluggings() + + # Build FOL formula trees using the pluggings. + readings = list(map(hole_sem.formula_tree, pluggings)) + + # Print out the formulas in a textual format. + if verbose: + for i, r in enumerate(readings): + print() + print("%d. %s" % (i, r)) + print() + + all_readings.extend(readings) + + return all_readings + + +if __name__ == "__main__": + for r in hole_readings("a dog barks"): + print(r) + print() + for r in hole_readings("every girl chases a dog"): + print(r) diff --git a/lib/python3.10/site-packages/nltk/sem/lfg.py b/lib/python3.10/site-packages/nltk/sem/lfg.py new file mode 100644 index 0000000000000000000000000000000000000000..13473b0087940c9b42cc4c36d5f442bb0f78eafe --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/lfg.py @@ -0,0 +1,261 @@ +# Natural Language Toolkit: Lexical Functional Grammar +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from itertools import chain + +from nltk.internals import Counter + + +class FStructure(dict): + def safeappend(self, key, item): + """ + Append 'item' to the list at 'key'. If no list exists for 'key', then + construct one. + """ + if key not in self: + self[key] = [] + self[key].append(item) + + def __setitem__(self, key, value): + dict.__setitem__(self, key.lower(), value) + + def __getitem__(self, key): + return dict.__getitem__(self, key.lower()) + + def __contains__(self, key): + return dict.__contains__(self, key.lower()) + + def to_glueformula_list(self, glue_dict): + depgraph = self.to_depgraph() + return glue_dict.to_glueformula_list(depgraph) + + def to_depgraph(self, rel=None): + from nltk.parse.dependencygraph import DependencyGraph + + depgraph = DependencyGraph() + nodes = depgraph.nodes + + self._to_depgraph(nodes, 0, "ROOT") + + # Add all the dependencies for all the nodes + for address, node in nodes.items(): + for n2 in (n for n in nodes.values() if n["rel"] != "TOP"): + if n2["head"] == address: + relation = n2["rel"] + node["deps"].setdefault(relation, []) + node["deps"][relation].append(n2["address"]) + + depgraph.root = nodes[1] + + return depgraph + + def _to_depgraph(self, nodes, head, rel): + index = len(nodes) + + nodes[index].update( + { + "address": index, + "word": self.pred[0], + "tag": self.pred[1], + "head": head, + "rel": rel, + } + ) + + for feature in sorted(self): + for item in sorted(self[feature]): + if isinstance(item, FStructure): + item._to_depgraph(nodes, index, feature) + elif isinstance(item, tuple): + new_index = len(nodes) + nodes[new_index].update( + { + "address": new_index, + "word": item[0], + "tag": item[1], + "head": index, + "rel": feature, + } + ) + elif isinstance(item, list): + for n in item: + n._to_depgraph(nodes, index, feature) + else: + raise Exception( + "feature %s is not an FStruct, a list, or a tuple" % feature + ) + + @staticmethod + def read_depgraph(depgraph): + return FStructure._read_depgraph(depgraph.root, depgraph) + + @staticmethod + def _read_depgraph(node, depgraph, label_counter=None, parent=None): + if not label_counter: + label_counter = Counter() + + if node["rel"].lower() in ["spec", "punct"]: + # the value of a 'spec' entry is a word, not an FStructure + return (node["word"], node["tag"]) + + else: + fstruct = FStructure() + fstruct.pred = None + fstruct.label = FStructure._make_label(label_counter.get()) + + fstruct.parent = parent + + word, tag = node["word"], node["tag"] + if tag[:2] == "VB": + if tag[2:3] == "D": + fstruct.safeappend("tense", ("PAST", "tense")) + fstruct.pred = (word, tag[:2]) + + if not fstruct.pred: + fstruct.pred = (word, tag) + + children = [ + depgraph.nodes[idx] + for idx in chain.from_iterable(node["deps"].values()) + ] + for child in children: + fstruct.safeappend( + child["rel"], + FStructure._read_depgraph(child, depgraph, label_counter, fstruct), + ) + + return fstruct + + @staticmethod + def _make_label(value): + """ + Pick an alphabetic character as identifier for an entity in the model. + + :param value: where to index into the list of characters + :type value: int + """ + letter = [ + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "a", + "b", + "c", + "d", + "e", + ][value - 1] + num = int(value) // 26 + if num > 0: + return letter + str(num) + else: + return letter + + def __repr__(self): + return self.__str__().replace("\n", "") + + def __str__(self): + return self.pretty_format() + + def pretty_format(self, indent=3): + try: + accum = "%s:[" % self.label + except NameError: + accum = "[" + try: + accum += "pred '%s'" % (self.pred[0]) + except NameError: + pass + + for feature in sorted(self): + for item in self[feature]: + if isinstance(item, FStructure): + next_indent = indent + len(feature) + 3 + len(self.label) + accum += "\n{}{} {}".format( + " " * (indent), + feature, + item.pretty_format(next_indent), + ) + elif isinstance(item, tuple): + accum += "\n{}{} '{}'".format(" " * (indent), feature, item[0]) + elif isinstance(item, list): + accum += "\n{}{} {{{}}}".format( + " " * (indent), + feature, + ("\n%s" % (" " * (indent + len(feature) + 2))).join(item), + ) + else: # ERROR + raise Exception( + "feature %s is not an FStruct, a list, or a tuple" % feature + ) + return accum + "]" + + +def demo_read_depgraph(): + from nltk.parse.dependencygraph import DependencyGraph + + dg1 = DependencyGraph( + """\ +Esso NNP 2 SUB +said VBD 0 ROOT +the DT 5 NMOD +Whiting NNP 5 NMOD +field NN 6 SUB +started VBD 2 VMOD +production NN 6 OBJ +Tuesday NNP 6 VMOD +""" + ) + dg2 = DependencyGraph( + """\ +John NNP 2 SUB +sees VBP 0 ROOT +Mary NNP 2 OBJ +""" + ) + dg3 = DependencyGraph( + """\ +a DT 2 SPEC +man NN 3 SUBJ +walks VB 0 ROOT +""" + ) + dg4 = DependencyGraph( + """\ +every DT 2 SPEC +girl NN 3 SUBJ +chases VB 0 ROOT +a DT 5 SPEC +dog NN 3 OBJ +""" + ) + + depgraphs = [dg1, dg2, dg3, dg4] + for dg in depgraphs: + print(FStructure.read_depgraph(dg)) + + +if __name__ == "__main__": + demo_read_depgraph() diff --git a/lib/python3.10/site-packages/nltk/sem/linearlogic.py b/lib/python3.10/site-packages/nltk/sem/linearlogic.py new file mode 100644 index 0000000000000000000000000000000000000000..474f835e2f1bbe19fe2486e259bea2d08fa473b1 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/linearlogic.py @@ -0,0 +1,482 @@ +# Natural Language Toolkit: Linear Logic +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from nltk.internals import Counter +from nltk.sem.logic import APP, LogicParser + +_counter = Counter() + + +class Tokens: + # Punctuation + OPEN = "(" + CLOSE = ")" + + # Operations + IMP = "-o" + + PUNCT = [OPEN, CLOSE] + TOKENS = PUNCT + [IMP] + + +class LinearLogicParser(LogicParser): + """A linear logic expression parser.""" + + def __init__(self): + LogicParser.__init__(self) + + self.operator_precedence = {APP: 1, Tokens.IMP: 2, None: 3} + self.right_associated_operations += [Tokens.IMP] + + def get_all_symbols(self): + return Tokens.TOKENS + + def handle(self, tok, context): + if tok not in Tokens.TOKENS: + return self.handle_variable(tok, context) + elif tok == Tokens.OPEN: + return self.handle_open(tok, context) + + def get_BooleanExpression_factory(self, tok): + if tok == Tokens.IMP: + return ImpExpression + else: + return None + + def make_BooleanExpression(self, factory, first, second): + return factory(first, second) + + def attempt_ApplicationExpression(self, expression, context): + """Attempt to make an application expression. If the next tokens + are an argument in parens, then the argument expression is a + function being applied to the arguments. Otherwise, return the + argument expression.""" + if self.has_priority(APP, context): + if self.inRange(0) and self.token(0) == Tokens.OPEN: + self.token() # swallow then open paren + argument = self.process_next_expression(APP) + self.assertNextToken(Tokens.CLOSE) + expression = ApplicationExpression(expression, argument, None) + return expression + + def make_VariableExpression(self, name): + if name[0].isupper(): + return VariableExpression(name) + else: + return ConstantExpression(name) + + +class Expression: + + _linear_logic_parser = LinearLogicParser() + + @classmethod + def fromstring(cls, s): + return cls._linear_logic_parser.parse(s) + + def applyto(self, other, other_indices=None): + return ApplicationExpression(self, other, other_indices) + + def __call__(self, other): + return self.applyto(other) + + def __repr__(self): + return f"<{self.__class__.__name__} {self}>" + + +class AtomicExpression(Expression): + def __init__(self, name, dependencies=None): + """ + :param name: str for the constant name + :param dependencies: list of int for the indices on which this atom is dependent + """ + assert isinstance(name, str) + self.name = name + + if not dependencies: + dependencies = [] + self.dependencies = dependencies + + def simplify(self, bindings=None): + """ + If 'self' is bound by 'bindings', return the atomic to which it is bound. + Otherwise, return self. + + :param bindings: ``BindingDict`` A dictionary of bindings used to simplify + :return: ``AtomicExpression`` + """ + if bindings and self in bindings: + return bindings[self] + else: + return self + + def compile_pos(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas + """ + self.dependencies = [] + return (self, []) + + def compile_neg(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas + """ + self.dependencies = [] + return (self, []) + + def initialize_labels(self, fstruct): + self.name = fstruct.initialize_label(self.name.lower()) + + def __eq__(self, other): + return self.__class__ == other.__class__ and self.name == other.name + + def __ne__(self, other): + return not self == other + + def __str__(self): + accum = self.name + if self.dependencies: + accum += "%s" % self.dependencies + return accum + + def __hash__(self): + return hash(self.name) + + +class ConstantExpression(AtomicExpression): + def unify(self, other, bindings): + """ + If 'other' is a constant, then it must be equal to 'self'. If 'other' is a variable, + then it must not be bound to anything other than 'self'. + + :param other: ``Expression`` + :param bindings: ``BindingDict`` A dictionary of all current bindings + :return: ``BindingDict`` A new combined dictionary of of 'bindings' and any new binding + :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings' + """ + assert isinstance(other, Expression) + if isinstance(other, VariableExpression): + try: + return bindings + BindingDict([(other, self)]) + except VariableBindingException: + pass + elif self == other: + return bindings + raise UnificationException(self, other, bindings) + + +class VariableExpression(AtomicExpression): + def unify(self, other, bindings): + """ + 'self' must not be bound to anything other than 'other'. + + :param other: ``Expression`` + :param bindings: ``BindingDict`` A dictionary of all current bindings + :return: ``BindingDict`` A new combined dictionary of of 'bindings' and the new binding + :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings' + """ + assert isinstance(other, Expression) + try: + if self == other: + return bindings + else: + return bindings + BindingDict([(self, other)]) + except VariableBindingException as e: + raise UnificationException(self, other, bindings) from e + + +class ImpExpression(Expression): + def __init__(self, antecedent, consequent): + """ + :param antecedent: ``Expression`` for the antecedent + :param consequent: ``Expression`` for the consequent + """ + assert isinstance(antecedent, Expression) + assert isinstance(consequent, Expression) + self.antecedent = antecedent + self.consequent = consequent + + def simplify(self, bindings=None): + return self.__class__( + self.antecedent.simplify(bindings), self.consequent.simplify(bindings) + ) + + def unify(self, other, bindings): + """ + Both the antecedent and consequent of 'self' and 'other' must unify. + + :param other: ``ImpExpression`` + :param bindings: ``BindingDict`` A dictionary of all current bindings + :return: ``BindingDict`` A new combined dictionary of of 'bindings' and any new bindings + :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings' + """ + assert isinstance(other, ImpExpression) + try: + return ( + bindings + + self.antecedent.unify(other.antecedent, bindings) + + self.consequent.unify(other.consequent, bindings) + ) + except VariableBindingException as e: + raise UnificationException(self, other, bindings) from e + + def compile_pos(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas + """ + (a, a_new) = self.antecedent.compile_neg(index_counter, glueFormulaFactory) + (c, c_new) = self.consequent.compile_pos(index_counter, glueFormulaFactory) + return (ImpExpression(a, c), a_new + c_new) + + def compile_neg(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,list of ``GlueFormula``) for the compiled linear logic and any newly created glue formulas + """ + (a, a_new) = self.antecedent.compile_pos(index_counter, glueFormulaFactory) + (c, c_new) = self.consequent.compile_neg(index_counter, glueFormulaFactory) + fresh_index = index_counter.get() + c.dependencies.append(fresh_index) + new_v = glueFormulaFactory("v%s" % fresh_index, a, {fresh_index}) + return (c, a_new + c_new + [new_v]) + + def initialize_labels(self, fstruct): + self.antecedent.initialize_labels(fstruct) + self.consequent.initialize_labels(fstruct) + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.antecedent == other.antecedent + and self.consequent == other.consequent + ) + + def __ne__(self, other): + return not self == other + + def __str__(self): + return "{}{} {} {}{}".format( + Tokens.OPEN, + self.antecedent, + Tokens.IMP, + self.consequent, + Tokens.CLOSE, + ) + + def __hash__(self): + return hash(f"{hash(self.antecedent)}{Tokens.IMP}{hash(self.consequent)}") + + +class ApplicationExpression(Expression): + def __init__(self, function, argument, argument_indices=None): + """ + :param function: ``Expression`` for the function + :param argument: ``Expression`` for the argument + :param argument_indices: set for the indices of the glue formula from which the argument came + :raise LinearLogicApplicationException: If 'function' cannot be applied to 'argument' given 'argument_indices'. + """ + function_simp = function.simplify() + argument_simp = argument.simplify() + + assert isinstance(function_simp, ImpExpression) + assert isinstance(argument_simp, Expression) + + bindings = BindingDict() + + try: + if isinstance(function, ApplicationExpression): + bindings += function.bindings + if isinstance(argument, ApplicationExpression): + bindings += argument.bindings + bindings += function_simp.antecedent.unify(argument_simp, bindings) + except UnificationException as e: + raise LinearLogicApplicationException( + f"Cannot apply {function_simp} to {argument_simp}. {e}" + ) from e + + # If you are running it on complied premises, more conditions apply + if argument_indices: + # A.dependencies of (A -o (B -o C)) must be a proper subset of argument_indices + if not set(function_simp.antecedent.dependencies) < argument_indices: + raise LinearLogicApplicationException( + "Dependencies unfulfilled when attempting to apply Linear Logic formula %s to %s" + % (function_simp, argument_simp) + ) + if set(function_simp.antecedent.dependencies) == argument_indices: + raise LinearLogicApplicationException( + "Dependencies not a proper subset of indices when attempting to apply Linear Logic formula %s to %s" + % (function_simp, argument_simp) + ) + + self.function = function + self.argument = argument + self.bindings = bindings + + def simplify(self, bindings=None): + """ + Since function is an implication, return its consequent. There should be + no need to check that the application is valid since the checking is done + by the constructor. + + :param bindings: ``BindingDict`` A dictionary of bindings used to simplify + :return: ``Expression`` + """ + if not bindings: + bindings = self.bindings + + return self.function.simplify(bindings).consequent + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.function == other.function + and self.argument == other.argument + ) + + def __ne__(self, other): + return not self == other + + def __str__(self): + return "%s" % self.function + Tokens.OPEN + "%s" % self.argument + Tokens.CLOSE + + def __hash__(self): + return hash(f"{hash(self.antecedent)}{Tokens.OPEN}{hash(self.consequent)}") + + +class BindingDict: + def __init__(self, bindings=None): + """ + :param bindings: + list [(``VariableExpression``, ``AtomicExpression``)] to initialize the dictionary + dict {``VariableExpression``: ``AtomicExpression``} to initialize the dictionary + """ + self.d = {} + + if isinstance(bindings, dict): + bindings = bindings.items() + + if bindings: + for (v, b) in bindings: + self[v] = b + + def __setitem__(self, variable, binding): + """ + A binding is consistent with the dict if its variable is not already bound, OR if its + variable is already bound to its argument. + + :param variable: ``VariableExpression`` The variable bind + :param binding: ``Expression`` The expression to which 'variable' should be bound + :raise VariableBindingException: If the variable cannot be bound in this dictionary + """ + assert isinstance(variable, VariableExpression) + assert isinstance(binding, Expression) + + assert variable != binding + + existing = self.d.get(variable, None) + + if not existing or binding == existing: + self.d[variable] = binding + else: + raise VariableBindingException( + "Variable %s already bound to another value" % (variable) + ) + + def __getitem__(self, variable): + """ + Return the expression to which 'variable' is bound + """ + assert isinstance(variable, VariableExpression) + + intermediate = self.d[variable] + while intermediate: + try: + intermediate = self.d[intermediate] + except KeyError: + return intermediate + + def __contains__(self, item): + return item in self.d + + def __add__(self, other): + """ + :param other: ``BindingDict`` The dict with which to combine self + :return: ``BindingDict`` A new dict containing all the elements of both parameters + :raise VariableBindingException: If the parameter dictionaries are not consistent with each other + """ + try: + combined = BindingDict() + for v in self.d: + combined[v] = self.d[v] + for v in other.d: + combined[v] = other.d[v] + return combined + except VariableBindingException as e: + raise VariableBindingException( + "Attempting to add two contradicting" + " VariableBindingsLists: %s, %s" % (self, other) + ) from e + + def __ne__(self, other): + return not self == other + + def __eq__(self, other): + if not isinstance(other, BindingDict): + raise TypeError + return self.d == other.d + + def __str__(self): + return "{" + ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys())) + "}" + + def __repr__(self): + return "BindingDict: %s" % self + + +class VariableBindingException(Exception): + pass + + +class UnificationException(Exception): + def __init__(self, a, b, bindings): + Exception.__init__(self, f"Cannot unify {a} with {b} given {bindings}") + + +class LinearLogicApplicationException(Exception): + pass + + +def demo(): + lexpr = Expression.fromstring + + print(lexpr(r"f")) + print(lexpr(r"(g -o f)")) + print(lexpr(r"((g -o G) -o G)")) + print(lexpr(r"g -o h -o f")) + print(lexpr(r"(g -o f)(g)").simplify()) + print(lexpr(r"(H -o f)(g)").simplify()) + print(lexpr(r"((g -o G) -o G)((g -o f))").simplify()) + print(lexpr(r"(H -o H)((g -o f))").simplify()) + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/sem/logic.py b/lib/python3.10/site-packages/nltk/sem/logic.py new file mode 100644 index 0000000000000000000000000000000000000000..aed3a118760b0a9111fc0445df870231f943e1e3 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/logic.py @@ -0,0 +1,2065 @@ +# Natural Language Toolkit: Logic +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +A version of first order predicate logic, built on +top of the typed lambda calculus. +""" + +import operator +import re +from collections import defaultdict +from functools import reduce, total_ordering + +from nltk.internals import Counter +from nltk.util import Trie + +APP = "APP" + +_counter = Counter() + + +class Tokens: + LAMBDA = "\\" + LAMBDA_LIST = ["\\"] + + # Quantifiers + EXISTS = "exists" + EXISTS_LIST = ["some", "exists", "exist"] + ALL = "all" + ALL_LIST = ["all", "forall"] + IOTA = "iota" + IOTA_LIST = ["iota"] + + # Punctuation + DOT = "." + OPEN = "(" + CLOSE = ")" + COMMA = "," + + # Operations + NOT = "-" + NOT_LIST = ["not", "-", "!"] + AND = "&" + AND_LIST = ["and", "&", "^"] + OR = "|" + OR_LIST = ["or", "|"] + IMP = "->" + IMP_LIST = ["implies", "->", "=>"] + IFF = "<->" + IFF_LIST = ["iff", "<->", "<=>"] + EQ = "=" + EQ_LIST = ["=", "=="] + NEQ = "!=" + NEQ_LIST = ["!="] + + # Collections of tokens + BINOPS = AND_LIST + OR_LIST + IMP_LIST + IFF_LIST + QUANTS = EXISTS_LIST + ALL_LIST + IOTA_LIST + PUNCT = [DOT, OPEN, CLOSE, COMMA] + + TOKENS = BINOPS + EQ_LIST + NEQ_LIST + QUANTS + LAMBDA_LIST + PUNCT + NOT_LIST + + # Special + SYMBOLS = [x for x in TOKENS if re.match(r"^[-\\.(),!&^|>=<]*$", x)] + + +def boolean_ops(): + """ + Boolean operators + """ + names = ["negation", "conjunction", "disjunction", "implication", "equivalence"] + for pair in zip(names, [Tokens.NOT, Tokens.AND, Tokens.OR, Tokens.IMP, Tokens.IFF]): + print("%-15s\t%s" % pair) + + +def equality_preds(): + """ + Equality predicates + """ + names = ["equality", "inequality"] + for pair in zip(names, [Tokens.EQ, Tokens.NEQ]): + print("%-15s\t%s" % pair) + + +def binding_ops(): + """ + Binding operators + """ + names = ["existential", "universal", "lambda"] + for pair in zip(names, [Tokens.EXISTS, Tokens.ALL, Tokens.LAMBDA, Tokens.IOTA]): + print("%-15s\t%s" % pair) + + +class LogicParser: + """A lambda calculus expression parser.""" + + def __init__(self, type_check=False): + """ + :param type_check: should type checking be performed + to their types? + :type type_check: bool + """ + assert isinstance(type_check, bool) + + self._currentIndex = 0 + self._buffer = [] + self.type_check = type_check + + """A list of tuples of quote characters. The 4-tuple is comprised + of the start character, the end character, the escape character, and + a boolean indicating whether the quotes should be included in the + result. Quotes are used to signify that a token should be treated as + atomic, ignoring any special characters within the token. The escape + character allows the quote end character to be used within the quote. + If True, the boolean indicates that the final token should contain the + quote and escape characters. + This method exists to be overridden""" + self.quote_chars = [] + + self.operator_precedence = dict( + [(x, 1) for x in Tokens.LAMBDA_LIST] + + [(x, 2) for x in Tokens.NOT_LIST] + + [(APP, 3)] + + [(x, 4) for x in Tokens.EQ_LIST + Tokens.NEQ_LIST] + + [(x, 5) for x in Tokens.QUANTS] + + [(x, 6) for x in Tokens.AND_LIST] + + [(x, 7) for x in Tokens.OR_LIST] + + [(x, 8) for x in Tokens.IMP_LIST] + + [(x, 9) for x in Tokens.IFF_LIST] + + [(None, 10)] + ) + self.right_associated_operations = [APP] + + def parse(self, data, signature=None): + """ + Parse the expression. + + :param data: str for the input to be parsed + :param signature: ``dict`` that maps variable names to type + strings + :returns: a parsed Expression + """ + data = data.rstrip() + + self._currentIndex = 0 + self._buffer, mapping = self.process(data) + + try: + result = self.process_next_expression(None) + if self.inRange(0): + raise UnexpectedTokenException(self._currentIndex + 1, self.token(0)) + except LogicalExpressionException as e: + msg = "{}\n{}\n{}^".format(e, data, " " * mapping[e.index - 1]) + raise LogicalExpressionException(None, msg) from e + + if self.type_check: + result.typecheck(signature) + + return result + + def process(self, data): + """Split the data into tokens""" + out = [] + mapping = {} + tokenTrie = Trie(self.get_all_symbols()) + token = "" + data_idx = 0 + token_start_idx = data_idx + while data_idx < len(data): + cur_data_idx = data_idx + quoted_token, data_idx = self.process_quoted_token(data_idx, data) + if quoted_token: + if not token: + token_start_idx = cur_data_idx + token += quoted_token + continue + + st = tokenTrie + c = data[data_idx] + symbol = "" + while c in st: + symbol += c + st = st[c] + if len(data) - data_idx > len(symbol): + c = data[data_idx + len(symbol)] + else: + break + if Trie.LEAF in st: + # token is a complete symbol + if token: + mapping[len(out)] = token_start_idx + out.append(token) + token = "" + mapping[len(out)] = data_idx + out.append(symbol) + data_idx += len(symbol) + else: + if data[data_idx] in " \t\n": # any whitespace + if token: + mapping[len(out)] = token_start_idx + out.append(token) + token = "" + else: + if not token: + token_start_idx = data_idx + token += data[data_idx] + data_idx += 1 + if token: + mapping[len(out)] = token_start_idx + out.append(token) + mapping[len(out)] = len(data) + mapping[len(out) + 1] = len(data) + 1 + return out, mapping + + def process_quoted_token(self, data_idx, data): + token = "" + c = data[data_idx] + i = data_idx + for start, end, escape, incl_quotes in self.quote_chars: + if c == start: + if incl_quotes: + token += c + i += 1 + while data[i] != end: + if data[i] == escape: + if incl_quotes: + token += data[i] + i += 1 + if len(data) == i: # if there are no more chars + raise LogicalExpressionException( + None, + "End of input reached. " + "Escape character [%s] found at end." % escape, + ) + token += data[i] + else: + token += data[i] + i += 1 + if len(data) == i: + raise LogicalExpressionException( + None, "End of input reached. " "Expected: [%s]" % end + ) + if incl_quotes: + token += data[i] + i += 1 + if not token: + raise LogicalExpressionException(None, "Empty quoted token found") + break + return token, i + + def get_all_symbols(self): + """This method exists to be overridden""" + return Tokens.SYMBOLS + + def inRange(self, location): + """Return TRUE if the given location is within the buffer""" + return self._currentIndex + location < len(self._buffer) + + def token(self, location=None): + """Get the next waiting token. If a location is given, then + return the token at currentIndex+location without advancing + currentIndex; setting it gives lookahead/lookback capability.""" + try: + if location is None: + tok = self._buffer[self._currentIndex] + self._currentIndex += 1 + else: + tok = self._buffer[self._currentIndex + location] + return tok + except IndexError as e: + raise ExpectedMoreTokensException(self._currentIndex + 1) from e + + def isvariable(self, tok): + return tok not in Tokens.TOKENS + + def process_next_expression(self, context): + """Parse the next complete expression from the stream and return it.""" + try: + tok = self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException( + self._currentIndex + 1, message="Expression expected." + ) from e + + accum = self.handle(tok, context) + + if not accum: + raise UnexpectedTokenException( + self._currentIndex, tok, message="Expression expected." + ) + + return self.attempt_adjuncts(accum, context) + + def handle(self, tok, context): + """This method is intended to be overridden for logics that + use different operators or expressions""" + if self.isvariable(tok): + return self.handle_variable(tok, context) + + elif tok in Tokens.NOT_LIST: + return self.handle_negation(tok, context) + + elif tok in Tokens.LAMBDA_LIST: + return self.handle_lambda(tok, context) + + elif tok in Tokens.QUANTS: + return self.handle_quant(tok, context) + + elif tok == Tokens.OPEN: + return self.handle_open(tok, context) + + def attempt_adjuncts(self, expression, context): + cur_idx = None + while cur_idx != self._currentIndex: # while adjuncts are added + cur_idx = self._currentIndex + expression = self.attempt_EqualityExpression(expression, context) + expression = self.attempt_ApplicationExpression(expression, context) + expression = self.attempt_BooleanExpression(expression, context) + return expression + + def handle_negation(self, tok, context): + return self.make_NegatedExpression(self.process_next_expression(Tokens.NOT)) + + def make_NegatedExpression(self, expression): + return NegatedExpression(expression) + + def handle_variable(self, tok, context): + # It's either: 1) a predicate expression: sees(x,y) + # 2) an application expression: P(x) + # 3) a solo variable: john OR x + accum = self.make_VariableExpression(tok) + if self.inRange(0) and self.token(0) == Tokens.OPEN: + # The predicate has arguments + if not isinstance(accum, FunctionVariableExpression) and not isinstance( + accum, ConstantExpression + ): + raise LogicalExpressionException( + self._currentIndex, + "'%s' is an illegal predicate name. " + "Individual variables may not be used as " + "predicates." % tok, + ) + self.token() # swallow the Open Paren + + # curry the arguments + accum = self.make_ApplicationExpression( + accum, self.process_next_expression(APP) + ) + while self.inRange(0) and self.token(0) == Tokens.COMMA: + self.token() # swallow the comma + accum = self.make_ApplicationExpression( + accum, self.process_next_expression(APP) + ) + self.assertNextToken(Tokens.CLOSE) + return accum + + def get_next_token_variable(self, description): + try: + tok = self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException(e.index, "Variable expected.") from e + if isinstance(self.make_VariableExpression(tok), ConstantExpression): + raise LogicalExpressionException( + self._currentIndex, + "'%s' is an illegal variable name. " + "Constants may not be %s." % (tok, description), + ) + return Variable(tok) + + def handle_lambda(self, tok, context): + # Expression is a lambda expression + if not self.inRange(0): + raise ExpectedMoreTokensException( + self._currentIndex + 2, + message="Variable and Expression expected following lambda operator.", + ) + vars = [self.get_next_token_variable("abstracted")] + while True: + if not self.inRange(0) or ( + self.token(0) == Tokens.DOT and not self.inRange(1) + ): + raise ExpectedMoreTokensException( + self._currentIndex + 2, message="Expression expected." + ) + if not self.isvariable(self.token(0)): + break + # Support expressions like: \x y.M == \x.\y.M + vars.append(self.get_next_token_variable("abstracted")) + if self.inRange(0) and self.token(0) == Tokens.DOT: + self.token() # swallow the dot + + accum = self.process_next_expression(tok) + while vars: + accum = self.make_LambdaExpression(vars.pop(), accum) + return accum + + def handle_quant(self, tok, context): + # Expression is a quantified expression: some x.M + factory = self.get_QuantifiedExpression_factory(tok) + + if not self.inRange(0): + raise ExpectedMoreTokensException( + self._currentIndex + 2, + message="Variable and Expression expected following quantifier '%s'." + % tok, + ) + vars = [self.get_next_token_variable("quantified")] + while True: + if not self.inRange(0) or ( + self.token(0) == Tokens.DOT and not self.inRange(1) + ): + raise ExpectedMoreTokensException( + self._currentIndex + 2, message="Expression expected." + ) + if not self.isvariable(self.token(0)): + break + # Support expressions like: some x y.M == some x.some y.M + vars.append(self.get_next_token_variable("quantified")) + if self.inRange(0) and self.token(0) == Tokens.DOT: + self.token() # swallow the dot + + accum = self.process_next_expression(tok) + while vars: + accum = self.make_QuanifiedExpression(factory, vars.pop(), accum) + return accum + + def get_QuantifiedExpression_factory(self, tok): + """This method serves as a hook for other logic parsers that + have different quantifiers""" + if tok in Tokens.EXISTS_LIST: + return ExistsExpression + elif tok in Tokens.ALL_LIST: + return AllExpression + elif tok in Tokens.IOTA_LIST: + return IotaExpression + else: + self.assertToken(tok, Tokens.QUANTS) + + def make_QuanifiedExpression(self, factory, variable, term): + return factory(variable, term) + + def handle_open(self, tok, context): + # Expression is in parens + accum = self.process_next_expression(None) + self.assertNextToken(Tokens.CLOSE) + return accum + + def attempt_EqualityExpression(self, expression, context): + """Attempt to make an equality expression. If the next token is an + equality operator, then an EqualityExpression will be returned. + Otherwise, the parameter will be returned.""" + if self.inRange(0): + tok = self.token(0) + if tok in Tokens.EQ_LIST + Tokens.NEQ_LIST and self.has_priority( + tok, context + ): + self.token() # swallow the "=" or "!=" + expression = self.make_EqualityExpression( + expression, self.process_next_expression(tok) + ) + if tok in Tokens.NEQ_LIST: + expression = self.make_NegatedExpression(expression) + return expression + + def make_EqualityExpression(self, first, second): + """This method serves as a hook for other logic parsers that + have different equality expression classes""" + return EqualityExpression(first, second) + + def attempt_BooleanExpression(self, expression, context): + """Attempt to make a boolean expression. If the next token is a boolean + operator, then a BooleanExpression will be returned. Otherwise, the + parameter will be returned.""" + while self.inRange(0): + tok = self.token(0) + factory = self.get_BooleanExpression_factory(tok) + if factory and self.has_priority(tok, context): + self.token() # swallow the operator + expression = self.make_BooleanExpression( + factory, expression, self.process_next_expression(tok) + ) + else: + break + return expression + + def get_BooleanExpression_factory(self, tok): + """This method serves as a hook for other logic parsers that + have different boolean operators""" + if tok in Tokens.AND_LIST: + return AndExpression + elif tok in Tokens.OR_LIST: + return OrExpression + elif tok in Tokens.IMP_LIST: + return ImpExpression + elif tok in Tokens.IFF_LIST: + return IffExpression + else: + return None + + def make_BooleanExpression(self, factory, first, second): + return factory(first, second) + + def attempt_ApplicationExpression(self, expression, context): + """Attempt to make an application expression. The next tokens are + a list of arguments in parens, then the argument expression is a + function being applied to the arguments. Otherwise, return the + argument expression.""" + if self.has_priority(APP, context): + if self.inRange(0) and self.token(0) == Tokens.OPEN: + if ( + not isinstance(expression, LambdaExpression) + and not isinstance(expression, ApplicationExpression) + and not isinstance(expression, FunctionVariableExpression) + and not isinstance(expression, ConstantExpression) + ): + raise LogicalExpressionException( + self._currentIndex, + ("The function '%s" % expression) + + "' is not a Lambda Expression, an " + "Application Expression, or a " + "functional predicate, so it may " + "not take arguments.", + ) + self.token() # swallow then open paren + # curry the arguments + accum = self.make_ApplicationExpression( + expression, self.process_next_expression(APP) + ) + while self.inRange(0) and self.token(0) == Tokens.COMMA: + self.token() # swallow the comma + accum = self.make_ApplicationExpression( + accum, self.process_next_expression(APP) + ) + self.assertNextToken(Tokens.CLOSE) + return accum + return expression + + def make_ApplicationExpression(self, function, argument): + return ApplicationExpression(function, argument) + + def make_VariableExpression(self, name): + return VariableExpression(Variable(name)) + + def make_LambdaExpression(self, variable, term): + return LambdaExpression(variable, term) + + def has_priority(self, operation, context): + return self.operator_precedence[operation] < self.operator_precedence[ + context + ] or ( + operation in self.right_associated_operations + and self.operator_precedence[operation] == self.operator_precedence[context] + ) + + def assertNextToken(self, expected): + try: + tok = self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException( + e.index, message="Expected token '%s'." % expected + ) from e + + if isinstance(expected, list): + if tok not in expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + else: + if tok != expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + + def assertToken(self, tok, expected): + if isinstance(expected, list): + if tok not in expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + else: + if tok != expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + + def __repr__(self): + if self.inRange(0): + msg = "Next token: " + self.token(0) + else: + msg = "No more tokens" + return "<" + self.__class__.__name__ + ": " + msg + ">" + + +def read_logic(s, logic_parser=None, encoding=None): + """ + Convert a file of First Order Formulas into a list of {Expression}s. + + :param s: the contents of the file + :type s: str + :param logic_parser: The parser to be used to parse the logical expression + :type logic_parser: LogicParser + :param encoding: the encoding of the input string, if it is binary + :type encoding: str + :return: a list of parsed formulas. + :rtype: list(Expression) + """ + if encoding is not None: + s = s.decode(encoding) + if logic_parser is None: + logic_parser = LogicParser() + + statements = [] + for linenum, line in enumerate(s.splitlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + statements.append(logic_parser.parse(line)) + except LogicalExpressionException as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + return statements + + +@total_ordering +class Variable: + def __init__(self, name): + """ + :param name: the name of the variable + """ + assert isinstance(name, str), "%s is not a string" % name + self.name = name + + def __eq__(self, other): + return isinstance(other, Variable) and self.name == other.name + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, Variable): + raise TypeError + return self.name < other.name + + def substitute_bindings(self, bindings): + return bindings.get(self, self) + + def __hash__(self): + return hash(self.name) + + def __str__(self): + return self.name + + def __repr__(self): + return "Variable('%s')" % self.name + + +def unique_variable(pattern=None, ignore=None): + """ + Return a new, unique variable. + + :param pattern: ``Variable`` that is being replaced. The new variable must + be the same type. + :param term: a set of ``Variable`` objects that should not be returned from + this function. + :rtype: Variable + """ + if pattern is not None: + if is_indvar(pattern.name): + prefix = "z" + elif is_funcvar(pattern.name): + prefix = "F" + elif is_eventvar(pattern.name): + prefix = "e0" + else: + assert False, "Cannot generate a unique constant" + else: + prefix = "z" + + v = Variable(f"{prefix}{_counter.get()}") + while ignore is not None and v in ignore: + v = Variable(f"{prefix}{_counter.get()}") + return v + + +def skolem_function(univ_scope=None): + """ + Return a skolem function over the variables in univ_scope + param univ_scope + """ + skolem = VariableExpression(Variable("F%s" % _counter.get())) + if univ_scope: + for v in list(univ_scope): + skolem = skolem(VariableExpression(v)) + return skolem + + +class Type: + def __repr__(self): + return "%s" % self + + def __hash__(self): + return hash("%s" % self) + + @classmethod + def fromstring(cls, s): + return read_type(s) + + +class ComplexType(Type): + def __init__(self, first, second): + assert isinstance(first, Type), "%s is not a Type" % first + assert isinstance(second, Type), "%s is not a Type" % second + self.first = first + self.second = second + + def __eq__(self, other): + return ( + isinstance(other, ComplexType) + and self.first == other.first + and self.second == other.second + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Type.__hash__ + + def matches(self, other): + if isinstance(other, ComplexType): + return self.first.matches(other.first) and self.second.matches(other.second) + else: + return self == ANY_TYPE + + def resolve(self, other): + if other == ANY_TYPE: + return self + elif isinstance(other, ComplexType): + f = self.first.resolve(other.first) + s = self.second.resolve(other.second) + if f and s: + return ComplexType(f, s) + else: + return None + elif self == ANY_TYPE: + return other + else: + return None + + def __str__(self): + if self == ANY_TYPE: + return "%s" % ANY_TYPE + else: + return f"<{self.first},{self.second}>" + + def str(self): + if self == ANY_TYPE: + return ANY_TYPE.str() + else: + return f"({self.first.str()} -> {self.second.str()})" + + +class BasicType(Type): + def __eq__(self, other): + return isinstance(other, BasicType) and ("%s" % self) == ("%s" % other) + + def __ne__(self, other): + return not self == other + + __hash__ = Type.__hash__ + + def matches(self, other): + return other == ANY_TYPE or self == other + + def resolve(self, other): + if self.matches(other): + return self + else: + return None + + +class EntityType(BasicType): + def __str__(self): + return "e" + + def str(self): + return "IND" + + +class TruthValueType(BasicType): + def __str__(self): + return "t" + + def str(self): + return "BOOL" + + +class EventType(BasicType): + def __str__(self): + return "v" + + def str(self): + return "EVENT" + + +class AnyType(BasicType, ComplexType): + def __init__(self): + pass + + @property + def first(self): + return self + + @property + def second(self): + return self + + def __eq__(self, other): + return isinstance(other, AnyType) or other.__eq__(self) + + def __ne__(self, other): + return not self == other + + __hash__ = Type.__hash__ + + def matches(self, other): + return True + + def resolve(self, other): + return other + + def __str__(self): + return "?" + + def str(self): + return "ANY" + + +TRUTH_TYPE = TruthValueType() +ENTITY_TYPE = EntityType() +EVENT_TYPE = EventType() +ANY_TYPE = AnyType() + + +def read_type(type_string): + assert isinstance(type_string, str) + type_string = type_string.replace(" ", "") # remove spaces + + if type_string[0] == "<": + assert type_string[-1] == ">" + paren_count = 0 + for i, char in enumerate(type_string): + if char == "<": + paren_count += 1 + elif char == ">": + paren_count -= 1 + assert paren_count > 0 + elif char == ",": + if paren_count == 1: + break + return ComplexType( + read_type(type_string[1:i]), read_type(type_string[i + 1 : -1]) + ) + elif type_string[0] == "%s" % ENTITY_TYPE: + return ENTITY_TYPE + elif type_string[0] == "%s" % TRUTH_TYPE: + return TRUTH_TYPE + elif type_string[0] == "%s" % ANY_TYPE: + return ANY_TYPE + else: + raise LogicalExpressionException( + None, "Unexpected character: '%s'." % type_string[0] + ) + + +class TypeException(Exception): + def __init__(self, msg): + super().__init__(msg) + + +class InconsistentTypeHierarchyException(TypeException): + def __init__(self, variable, expression=None): + if expression: + msg = ( + "The variable '%s' was found in multiple places with different" + " types in '%s'." % (variable, expression) + ) + else: + msg = ( + "The variable '%s' was found in multiple places with different" + " types." % (variable) + ) + super().__init__(msg) + + +class TypeResolutionException(TypeException): + def __init__(self, expression, other_type): + super().__init__( + "The type of '%s', '%s', cannot be resolved with type '%s'" + % (expression, expression.type, other_type) + ) + + +class IllegalTypeException(TypeException): + def __init__(self, expression, other_type, allowed_type): + super().__init__( + "Cannot set type of %s '%s' to '%s'; must match type '%s'." + % (expression.__class__.__name__, expression, other_type, allowed_type) + ) + + +def typecheck(expressions, signature=None): + """ + Ensure correct typing across a collection of ``Expression`` objects. + :param expressions: a collection of expressions + :param signature: dict that maps variable names to types (or string + representations of types) + """ + # typecheck and create master signature + for expression in expressions: + signature = expression.typecheck(signature) + # apply master signature to all expressions + for expression in expressions[:-1]: + expression.typecheck(signature) + return signature + + +class SubstituteBindingsI: + """ + An interface for classes that can perform substitutions for + variables. + """ + + def substitute_bindings(self, bindings): + """ + :return: The object that is obtained by replacing + each variable bound by ``bindings`` with its values. + Aliases are already resolved. (maybe?) + :rtype: (any) + """ + raise NotImplementedError() + + def variables(self): + """ + :return: A list of all variables in this object. + """ + raise NotImplementedError() + + +class Expression(SubstituteBindingsI): + """This is the base abstract object for all logical expressions""" + + _logic_parser = LogicParser() + _type_checking_logic_parser = LogicParser(type_check=True) + + @classmethod + def fromstring(cls, s, type_check=False, signature=None): + if type_check: + return cls._type_checking_logic_parser.parse(s, signature) + else: + return cls._logic_parser.parse(s, signature) + + def __call__(self, other, *additional): + accum = self.applyto(other) + for a in additional: + accum = accum(a) + return accum + + def applyto(self, other): + assert isinstance(other, Expression), "%s is not an Expression" % other + return ApplicationExpression(self, other) + + def __neg__(self): + return NegatedExpression(self) + + def negate(self): + """If this is a negated expression, remove the negation. + Otherwise add a negation.""" + return -self + + def __and__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return AndExpression(self, other) + + def __or__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return OrExpression(self, other) + + def __gt__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return ImpExpression(self, other) + + def __lt__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return IffExpression(self, other) + + def __eq__(self, other): + return NotImplemented + + def __ne__(self, other): + return not self == other + + def equiv(self, other, prover=None): + """ + Check for logical equivalence. + Pass the expression (self <-> other) to the theorem prover. + If the prover says it is valid, then the self and other are equal. + + :param other: an ``Expression`` to check equality against + :param prover: a ``nltk.inference.api.Prover`` + """ + assert isinstance(other, Expression), "%s is not an Expression" % other + + if prover is None: + from nltk.inference import Prover9 + + prover = Prover9() + bicond = IffExpression(self.simplify(), other.simplify()) + return prover.prove(bicond) + + def __hash__(self): + return hash(repr(self)) + + def substitute_bindings(self, bindings): + expr = self + for var in expr.variables(): + if var in bindings: + val = bindings[var] + if isinstance(val, Variable): + val = self.make_VariableExpression(val) + elif not isinstance(val, Expression): + raise ValueError( + "Can not substitute a non-expression " + "value into an expression: %r" % (val,) + ) + # Substitute bindings in the target value. + val = val.substitute_bindings(bindings) + # Replace var w/ the target value. + expr = expr.replace(var, val) + return expr.simplify() + + def typecheck(self, signature=None): + """ + Infer and check types. Raise exceptions if necessary. + + :param signature: dict that maps variable names to types (or string + representations of types) + :return: the signature, plus any additional type mappings + """ + sig = defaultdict(list) + if signature: + for key in signature: + val = signature[key] + varEx = VariableExpression(Variable(key)) + if isinstance(val, Type): + varEx.type = val + else: + varEx.type = read_type(val) + sig[key].append(varEx) + + self._set_type(signature=sig) + + return {key: sig[key][0].type for key in sig} + + def findtype(self, variable): + """ + Find the type of the given variable as it is used in this expression. + For example, finding the type of "P" in "P(x) & Q(x,y)" yields "" + + :param variable: Variable + """ + raise NotImplementedError() + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """ + Set the type of this expression to be the given type. Raise type + exceptions where applicable. + + :param other_type: Type + :param signature: dict(str -> list(AbstractVariableExpression)) + """ + raise NotImplementedError() + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """ + Replace every instance of 'variable' with 'expression' + :param variable: ``Variable`` The variable to replace + :param expression: ``Expression`` The expression with which to replace it + :param replace_bound: bool Should bound variables be replaced? + :param alpha_convert: bool Alpha convert automatically to avoid name clashes? + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + assert isinstance(expression, Expression), ( + "%s is not an Expression" % expression + ) + + return self.visit_structured( + lambda e: e.replace(variable, expression, replace_bound, alpha_convert), + self.__class__, + ) + + def normalize(self, newvars=None): + """Rename auto-generated unique variables""" + + def get_indiv_vars(e): + if isinstance(e, IndividualVariableExpression): + return {e} + elif isinstance(e, AbstractVariableExpression): + return set() + else: + return e.visit( + get_indiv_vars, lambda parts: reduce(operator.or_, parts, set()) + ) + + result = self + for i, e in enumerate(sorted(get_indiv_vars(self), key=lambda e: e.variable)): + if isinstance(e, EventVariableExpression): + newVar = e.__class__(Variable("e0%s" % (i + 1))) + elif isinstance(e, IndividualVariableExpression): + newVar = e.__class__(Variable("z%s" % (i + 1))) + else: + newVar = e + result = result.replace(e.variable, newVar, True) + return result + + def visit(self, function, combinator): + """ + Recursively visit subexpressions. Apply 'function' to each + subexpression and pass the result of each function application + to the 'combinator' for aggregation: + + return combinator(map(function, self.subexpressions)) + + Bound variables are neither applied upon by the function nor given to + the combinator. + :param function: ``Function`` to call on each subexpression + :param combinator: ``Function,R>`` to combine the results of the + function calls + :return: result of combination ``R`` + """ + raise NotImplementedError() + + def visit_structured(self, function, combinator): + """ + Recursively visit subexpressions. Apply 'function' to each + subexpression and pass the result of each function application + to the 'combinator' for aggregation. The combinator must have + the same signature as the constructor. The function is not + applied to bound variables, but they are passed to the + combinator. + :param function: ``Function`` to call on each subexpression + :param combinator: ``Function`` with the same signature as the + constructor, to combine the results of the function calls + :return: result of combination + """ + return self.visit(function, lambda parts: combinator(*parts)) + + def __repr__(self): + return f"<{self.__class__.__name__} {self}>" + + def __str__(self): + return self.str() + + def variables(self): + """ + Return a set of all the variables for binding substitution. + The variables returned include all free (non-bound) individual + variables and any variable starting with '?' or '@'. + :return: set of ``Variable`` objects + """ + return self.free() | { + p for p in self.predicates() | self.constants() if re.match("^[?@]", p.name) + } + + def free(self): + """ + Return a set of all the free (non-bound) variables. This includes + both individual and predicate variables, but not constants. + :return: set of ``Variable`` objects + """ + return self.visit( + lambda e: e.free(), lambda parts: reduce(operator.or_, parts, set()) + ) + + def constants(self): + """ + Return a set of individual constants (non-predicates). + :return: set of ``Variable`` objects + """ + return self.visit( + lambda e: e.constants(), lambda parts: reduce(operator.or_, parts, set()) + ) + + def predicates(self): + """ + Return a set of predicates (constants, not variables). + :return: set of ``Variable`` objects + """ + return self.visit( + lambda e: e.predicates(), lambda parts: reduce(operator.or_, parts, set()) + ) + + def simplify(self): + """ + :return: beta-converted version of this expression + """ + return self.visit_structured(lambda e: e.simplify(), self.__class__) + + def make_VariableExpression(self, variable): + return VariableExpression(variable) + + +class ApplicationExpression(Expression): + r""" + This class is used to represent two related types of logical expressions. + + The first is a Predicate Expression, such as "P(x,y)". A predicate + expression is comprised of a ``FunctionVariableExpression`` or + ``ConstantExpression`` as the predicate and a list of Expressions as the + arguments. + + The second is a an application of one expression to another, such as + "(\x.dog(x))(fido)". + + The reason Predicate Expressions are treated as Application Expressions is + that the Variable Expression predicate of the expression may be replaced + with another Expression, such as a LambdaExpression, which would mean that + the Predicate should be thought of as being applied to the arguments. + + The logical expression reader will always curry arguments in a application expression. + So, "\x y.see(x,y)(john,mary)" will be represented internally as + "((\x y.(see(x))(y))(john))(mary)". This simplifies the internals since + there will always be exactly one argument in an application. + + The str() method will usually print the curried forms of application + expressions. The one exception is when the the application expression is + really a predicate expression (ie, underlying function is an + ``AbstractVariableExpression``). This means that the example from above + will be returned as "(\x y.see(x,y)(john))(mary)". + """ + + def __init__(self, function, argument): + """ + :param function: ``Expression``, for the function expression + :param argument: ``Expression``, for the argument + """ + assert isinstance(function, Expression), "%s is not an Expression" % function + assert isinstance(argument, Expression), "%s is not an Expression" % argument + self.function = function + self.argument = argument + + def simplify(self): + function = self.function.simplify() + argument = self.argument.simplify() + if isinstance(function, LambdaExpression): + return function.term.replace(function.variable, argument).simplify() + else: + return self.__class__(function, argument) + + @property + def type(self): + if isinstance(self.function.type, ComplexType): + return self.function.type.second + else: + return ANY_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + self.argument._set_type(ANY_TYPE, signature) + try: + self.function._set_type( + ComplexType(self.argument.type, other_type), signature + ) + except TypeResolutionException as e: + raise TypeException( + "The function '%s' is of type '%s' and cannot be applied " + "to '%s' of type '%s'. Its argument must match type '%s'." + % ( + self.function, + self.function.type, + self.argument, + self.argument.type, + self.function.type.first, + ) + ) from e + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if self.is_atom(): + function, args = self.uncurry() + else: + # It's not a predicate expression ("P(x,y)"), so leave args curried + function = self.function + args = [self.argument] + + found = [arg.findtype(variable) for arg in [function] + args] + + unique = [] + for f in found: + if f != ANY_TYPE: + if unique: + for u in unique: + if f.matches(u): + break + else: + unique.append(f) + + if len(unique) == 1: + return list(unique)[0] + else: + return ANY_TYPE + + def constants(self): + """:see: Expression.constants()""" + if isinstance(self.function, AbstractVariableExpression): + function_constants = set() + else: + function_constants = self.function.constants() + return function_constants | self.argument.constants() + + def predicates(self): + """:see: Expression.predicates()""" + if isinstance(self.function, ConstantExpression): + function_preds = {self.function.variable} + else: + function_preds = self.function.predicates() + return function_preds | self.argument.predicates() + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.function), function(self.argument)]) + + def __eq__(self, other): + return ( + isinstance(other, ApplicationExpression) + and self.function == other.function + and self.argument == other.argument + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + # uncurry the arguments and find the base function + if self.is_atom(): + function, args = self.uncurry() + arg_str = ",".join("%s" % arg for arg in args) + else: + # Leave arguments curried + function = self.function + arg_str = "%s" % self.argument + + function_str = "%s" % function + parenthesize_function = False + if isinstance(function, LambdaExpression): + if isinstance(function.term, ApplicationExpression): + if not isinstance(function.term.function, AbstractVariableExpression): + parenthesize_function = True + elif not isinstance(function.term, BooleanExpression): + parenthesize_function = True + elif isinstance(function, ApplicationExpression): + parenthesize_function = True + + if parenthesize_function: + function_str = Tokens.OPEN + function_str + Tokens.CLOSE + + return function_str + Tokens.OPEN + arg_str + Tokens.CLOSE + + def uncurry(self): + """ + Uncurry this application expression + + return: A tuple (base-function, arg-list) + """ + function = self.function + args = [self.argument] + while isinstance(function, ApplicationExpression): + # (\x.\y.sees(x,y)(john))(mary) + args.insert(0, function.argument) + function = function.function + return (function, args) + + @property + def pred(self): + """ + Return uncurried base-function. + If this is an atom, then the result will be a variable expression. + Otherwise, it will be a lambda expression. + """ + return self.uncurry()[0] + + @property + def args(self): + """ + Return uncurried arg-list + """ + return self.uncurry()[1] + + def is_atom(self): + """ + Is this expression an atom (as opposed to a lambda expression applied + to a term)? + """ + return isinstance(self.pred, AbstractVariableExpression) + + +@total_ordering +class AbstractVariableExpression(Expression): + """This class represents a variable to be used as a predicate or entity""" + + def __init__(self, variable): + """ + :param variable: ``Variable``, for the variable + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + self.variable = variable + + def simplify(self): + return self + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """:see: Expression.replace()""" + assert isinstance(variable, Variable), "%s is not an Variable" % variable + assert isinstance(expression, Expression), ( + "%s is not an Expression" % expression + ) + if self.variable == variable: + return expression + else: + return self + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + resolution = other_type + for varEx in signature[self.variable.name]: + resolution = varEx.type.resolve(resolution) + if not resolution: + raise InconsistentTypeHierarchyException(self) + + signature[self.variable.name].append(self) + for varEx in signature[self.variable.name]: + varEx.type = resolution + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if self.variable == variable: + return self.type + else: + return ANY_TYPE + + def predicates(self): + """:see: Expression.predicates()""" + return set() + + def __eq__(self, other): + """Allow equality between instances of ``AbstractVariableExpression`` + subtypes.""" + return ( + isinstance(other, AbstractVariableExpression) + and self.variable == other.variable + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, AbstractVariableExpression): + raise TypeError + return self.variable < other.variable + + __hash__ = Expression.__hash__ + + def __str__(self): + return "%s" % self.variable + + +class IndividualVariableExpression(AbstractVariableExpression): + """This class represents variables that take the form of a single lowercase + character (other than 'e') followed by zero or more digits.""" + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(ENTITY_TYPE): + raise IllegalTypeException(self, other_type, ENTITY_TYPE) + + signature[self.variable.name].append(self) + + def _get_type(self): + return ENTITY_TYPE + + type = property(_get_type, _set_type) + + def free(self): + """:see: Expression.free()""" + return {self.variable} + + def constants(self): + """:see: Expression.constants()""" + return set() + + +class FunctionVariableExpression(AbstractVariableExpression): + """This class represents variables that take the form of a single uppercase + character followed by zero or more digits.""" + + type = ANY_TYPE + + def free(self): + """:see: Expression.free()""" + return {self.variable} + + def constants(self): + """:see: Expression.constants()""" + return set() + + +class EventVariableExpression(IndividualVariableExpression): + """This class represents variables that take the form of a single lowercase + 'e' character followed by zero or more digits.""" + + type = EVENT_TYPE + + +class ConstantExpression(AbstractVariableExpression): + """This class represents variables that do not take the form of a single + character followed by zero or more digits.""" + + type = ENTITY_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if other_type == ANY_TYPE: + # entity type by default, for individuals + resolution = ENTITY_TYPE + else: + resolution = other_type + if self.type != ENTITY_TYPE: + resolution = resolution.resolve(self.type) + + for varEx in signature[self.variable.name]: + resolution = varEx.type.resolve(resolution) + if not resolution: + raise InconsistentTypeHierarchyException(self) + + signature[self.variable.name].append(self) + for varEx in signature[self.variable.name]: + varEx.type = resolution + + def free(self): + """:see: Expression.free()""" + return set() + + def constants(self): + """:see: Expression.constants()""" + return {self.variable} + + +def VariableExpression(variable): + """ + This is a factory method that instantiates and returns a subtype of + ``AbstractVariableExpression`` appropriate for the given variable. + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if is_indvar(variable.name): + return IndividualVariableExpression(variable) + elif is_funcvar(variable.name): + return FunctionVariableExpression(variable) + elif is_eventvar(variable.name): + return EventVariableExpression(variable) + else: + return ConstantExpression(variable) + + +class VariableBinderExpression(Expression): + """This an abstract class for any Expression that binds a variable in an + Expression. This includes LambdaExpressions and Quantified Expressions""" + + def __init__(self, variable, term): + """ + :param variable: ``Variable``, for the variable + :param term: ``Expression``, for the term + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + assert isinstance(term, Expression), "%s is not an Expression" % term + self.variable = variable + self.term = term + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """:see: Expression.replace()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + assert isinstance(expression, Expression), ( + "%s is not an Expression" % expression + ) + # if the bound variable is the thing being replaced + if self.variable == variable: + if replace_bound: + assert isinstance(expression, AbstractVariableExpression), ( + "%s is not a AbstractVariableExpression" % expression + ) + return self.__class__( + expression.variable, + self.term.replace(variable, expression, True, alpha_convert), + ) + else: + return self + else: + # if the bound variable appears in the expression, then it must + # be alpha converted to avoid a conflict + if alpha_convert and self.variable in expression.free(): + self = self.alpha_convert(unique_variable(pattern=self.variable)) + + # replace in the term + return self.__class__( + self.variable, + self.term.replace(variable, expression, replace_bound, alpha_convert), + ) + + def alpha_convert(self, newvar): + """Rename all occurrences of the variable introduced by this variable + binder in the expression to ``newvar``. + :param newvar: ``Variable``, for the new variable + """ + assert isinstance(newvar, Variable), "%s is not a Variable" % newvar + return self.__class__( + newvar, self.term.replace(self.variable, VariableExpression(newvar), True) + ) + + def free(self): + """:see: Expression.free()""" + return self.term.free() - {self.variable} + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if variable == self.variable: + return ANY_TYPE + else: + return self.term.findtype(variable) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.term)]) + + def visit_structured(self, function, combinator): + """:see: Expression.visit_structured()""" + return combinator(self.variable, function(self.term)) + + def __eq__(self, other): + r"""Defines equality modulo alphabetic variance. If we are comparing + \x.M and \y.N, then check equality of M and N[x/y].""" + if isinstance(self, other.__class__) or isinstance(other, self.__class__): + if self.variable == other.variable: + return self.term == other.term + else: + # Comparing \x.M and \y.N. Relabel y in N with x and continue. + varex = VariableExpression(self.variable) + return self.term == other.term.replace(other.variable, varex) + else: + return False + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + +class LambdaExpression(VariableBinderExpression): + @property + def type(self): + return ComplexType(self.term.findtype(self.variable), self.term.type) + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + self.term._set_type(other_type.second, signature) + if not self.type.resolve(other_type): + raise TypeResolutionException(self, other_type) + + def __str__(self): + variables = [self.variable] + term = self.term + while term.__class__ == self.__class__: + variables.append(term.variable) + term = term.term + return ( + Tokens.LAMBDA + + " ".join("%s" % v for v in variables) + + Tokens.DOT + + "%s" % term + ) + + +class QuantifiedExpression(VariableBinderExpression): + @property + def type(self): + return TRUTH_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.term._set_type(TRUTH_TYPE, signature) + + def __str__(self): + variables = [self.variable] + term = self.term + while term.__class__ == self.__class__: + variables.append(term.variable) + term = term.term + return ( + self.getQuantifier() + + " " + + " ".join("%s" % v for v in variables) + + Tokens.DOT + + "%s" % term + ) + + +class ExistsExpression(QuantifiedExpression): + def getQuantifier(self): + return Tokens.EXISTS + + +class AllExpression(QuantifiedExpression): + def getQuantifier(self): + return Tokens.ALL + + +class IotaExpression(QuantifiedExpression): + def getQuantifier(self): + return Tokens.IOTA + + +class NegatedExpression(Expression): + def __init__(self, term): + assert isinstance(term, Expression), "%s is not an Expression" % term + self.term = term + + @property + def type(self): + return TRUTH_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.term._set_type(TRUTH_TYPE, signature) + + def findtype(self, variable): + assert isinstance(variable, Variable), "%s is not a Variable" % variable + return self.term.findtype(variable) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.term)]) + + def negate(self): + """:see: Expression.negate()""" + return self.term + + def __eq__(self, other): + return isinstance(other, NegatedExpression) and self.term == other.term + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + return Tokens.NOT + "%s" % self.term + + +class BinaryExpression(Expression): + def __init__(self, first, second): + assert isinstance(first, Expression), "%s is not an Expression" % first + assert isinstance(second, Expression), "%s is not an Expression" % second + self.first = first + self.second = second + + @property + def type(self): + return TRUTH_TYPE + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + f = self.first.findtype(variable) + s = self.second.findtype(variable) + if f == s or s == ANY_TYPE: + return f + elif f == ANY_TYPE: + return s + else: + return ANY_TYPE + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.first), function(self.second)]) + + def __eq__(self, other): + return ( + (isinstance(self, other.__class__) or isinstance(other, self.__class__)) + and self.first == other.first + and self.second == other.second + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + first = self._str_subex(self.first) + second = self._str_subex(self.second) + return Tokens.OPEN + first + " " + self.getOp() + " " + second + Tokens.CLOSE + + def _str_subex(self, subex): + return "%s" % subex + + +class BooleanExpression(BinaryExpression): + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.first._set_type(TRUTH_TYPE, signature) + self.second._set_type(TRUTH_TYPE, signature) + + +class AndExpression(BooleanExpression): + """This class represents conjunctions""" + + def getOp(self): + return Tokens.AND + + def _str_subex(self, subex): + s = "%s" % subex + if isinstance(subex, AndExpression): + return s[1:-1] + return s + + +class OrExpression(BooleanExpression): + """This class represents disjunctions""" + + def getOp(self): + return Tokens.OR + + def _str_subex(self, subex): + s = "%s" % subex + if isinstance(subex, OrExpression): + return s[1:-1] + return s + + +class ImpExpression(BooleanExpression): + """This class represents implications""" + + def getOp(self): + return Tokens.IMP + + +class IffExpression(BooleanExpression): + """This class represents biconditionals""" + + def getOp(self): + return Tokens.IFF + + +class EqualityExpression(BinaryExpression): + """This class represents equality expressions like "(x = y)".""" + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.first._set_type(ENTITY_TYPE, signature) + self.second._set_type(ENTITY_TYPE, signature) + + def getOp(self): + return Tokens.EQ + + +### Utilities + + +class LogicalExpressionException(Exception): + def __init__(self, index, message): + self.index = index + Exception.__init__(self, message) + + +class UnexpectedTokenException(LogicalExpressionException): + def __init__(self, index, unexpected=None, expected=None, message=None): + if unexpected and expected: + msg = "Unexpected token: '%s'. " "Expected token '%s'." % ( + unexpected, + expected, + ) + elif unexpected: + msg = "Unexpected token: '%s'." % unexpected + if message: + msg += " " + message + else: + msg = "Expected token '%s'." % expected + LogicalExpressionException.__init__(self, index, msg) + + +class ExpectedMoreTokensException(LogicalExpressionException): + def __init__(self, index, message=None): + if not message: + message = "More tokens expected." + LogicalExpressionException.__init__( + self, index, "End of input found. " + message + ) + + +def is_indvar(expr): + """ + An individual variable must be a single lowercase character other than 'e', + followed by zero or more digits. + + :param expr: str + :return: bool True if expr is of the correct form + """ + assert isinstance(expr, str), "%s is not a string" % expr + return re.match(r"^[a-df-z]\d*$", expr) is not None + + +def is_funcvar(expr): + """ + A function variable must be a single uppercase character followed by + zero or more digits. + + :param expr: str + :return: bool True if expr is of the correct form + """ + assert isinstance(expr, str), "%s is not a string" % expr + return re.match(r"^[A-Z]\d*$", expr) is not None + + +def is_eventvar(expr): + """ + An event variable must be a single lowercase 'e' character followed by + zero or more digits. + + :param expr: str + :return: bool True if expr is of the correct form + """ + assert isinstance(expr, str), "%s is not a string" % expr + return re.match(r"^e\d*$", expr) is not None + + +def demo(): + lexpr = Expression.fromstring + print("=" * 20 + "Test reader" + "=" * 20) + print(lexpr(r"john")) + print(lexpr(r"man(x)")) + print(lexpr(r"-man(x)")) + print(lexpr(r"(man(x) & tall(x) & walks(x))")) + print(lexpr(r"exists x.(man(x) & tall(x) & walks(x))")) + print(lexpr(r"\x.man(x)")) + print(lexpr(r"\x.man(x)(john)")) + print(lexpr(r"\x y.sees(x,y)")) + print(lexpr(r"\x y.sees(x,y)(a,b)")) + print(lexpr(r"(\x.exists y.walks(x,y))(x)")) + print(lexpr(r"exists x.x = y")) + print(lexpr(r"exists x.(x = y)")) + print(lexpr("P(x) & x=y & P(y)")) + print(lexpr(r"\P Q.exists x.(P(x) & Q(x))")) + print(lexpr(r"man(x) <-> tall(x)")) + + print("=" * 20 + "Test simplify" + "=" * 20) + print(lexpr(r"\x.\y.sees(x,y)(john)(mary)").simplify()) + print(lexpr(r"\x.\y.sees(x,y)(john, mary)").simplify()) + print(lexpr(r"all x.(man(x) & (\x.exists y.walks(x,y))(x))").simplify()) + print(lexpr(r"(\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x))(\x.bark(x))").simplify()) + + print("=" * 20 + "Test alpha conversion and binder expression equality" + "=" * 20) + e1 = lexpr("exists x.P(x)") + print(e1) + e2 = e1.alpha_convert(Variable("z")) + print(e2) + print(e1 == e2) + + +def demo_errors(): + print("=" * 20 + "Test reader errors" + "=" * 20) + demoException("(P(x) & Q(x)") + demoException("((P(x) &) & Q(x))") + demoException("P(x) -> ") + demoException("P(x") + demoException("P(x,") + demoException("P(x,)") + demoException("exists") + demoException("exists x.") + demoException("\\") + demoException("\\ x y.") + demoException("P(x)Q(x)") + demoException("(P(x)Q(x)") + demoException("exists x -> y") + + +def demoException(s): + try: + Expression.fromstring(s) + except LogicalExpressionException as e: + print(f"{e.__class__.__name__}: {e}") + + +def printtype(ex): + print(f"{ex.str()} : {ex.type}") + + +if __name__ == "__main__": + demo() +# demo_errors() diff --git a/lib/python3.10/site-packages/nltk/sem/relextract.py b/lib/python3.10/site-packages/nltk/sem/relextract.py new file mode 100644 index 0000000000000000000000000000000000000000..fcf755a3c4ab91678ae2965b96e79235a7c59120 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/relextract.py @@ -0,0 +1,539 @@ +# Natural Language Toolkit: Relation Extraction +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +Code for extracting relational triples from the ieer and conll2002 corpora. + +Relations are stored internally as dictionaries ('reldicts'). + +The two serialization outputs are "rtuple" and "clause". + +- An rtuple is a tuple of the form ``(subj, filler, obj)``, + where ``subj`` and ``obj`` are pairs of Named Entity mentions, and ``filler`` is the string of words + occurring between ``sub`` and ``obj`` (with no intervening NEs). Strings are printed via ``repr()`` to + circumvent locale variations in rendering utf-8 encoded strings. +- A clause is an atom of the form ``relsym(subjsym, objsym)``, + where the relation, subject and object have been canonicalized to single strings. +""" + +# todo: get a more general solution to canonicalized symbols for clauses -- maybe use xmlcharrefs? + +import html +import re +from collections import defaultdict + +# Dictionary that associates corpora with NE classes +NE_CLASSES = { + "ieer": [ + "LOCATION", + "ORGANIZATION", + "PERSON", + "DURATION", + "DATE", + "CARDINAL", + "PERCENT", + "MONEY", + "MEASURE", + ], + "conll2002": ["LOC", "PER", "ORG"], + "ace": [ + "LOCATION", + "ORGANIZATION", + "PERSON", + "DURATION", + "DATE", + "CARDINAL", + "PERCENT", + "MONEY", + "MEASURE", + "FACILITY", + "GPE", + ], +} + +# Allow abbreviated class labels +short2long = dict(LOC="LOCATION", ORG="ORGANIZATION", PER="PERSON") +long2short = dict(LOCATION="LOC", ORGANIZATION="ORG", PERSON="PER") + + +def _expand(type): + """ + Expand an NE class name. + :type type: str + :rtype: str + """ + try: + return short2long[type] + except KeyError: + return type + + +def class_abbrev(type): + """ + Abbreviate an NE class name. + :type type: str + :rtype: str + """ + try: + return long2short[type] + except KeyError: + return type + + +def _join(lst, sep=" ", untag=False): + """ + Join a list into a string, turning tags tuples into tag strings or just words. + :param untag: if ``True``, omit the tag from tagged input strings. + :type lst: list + :rtype: str + """ + try: + return sep.join(lst) + except TypeError: + if untag: + return sep.join(tup[0] for tup in lst) + from nltk.tag import tuple2str + + return sep.join(tuple2str(tup) for tup in lst) + + +def descape_entity(m, defs=html.entities.entitydefs): + """ + Translate one entity to its ISO Latin value. + Inspired by example from effbot.org + + + """ + try: + return defs[m.group(1)] + + except KeyError: + return m.group(0) # use as is + + +def list2sym(lst): + """ + Convert a list of strings into a canonical symbol. + :type lst: list + :return: a Unicode string without whitespace + :rtype: unicode + """ + sym = _join(lst, "_", untag=True) + sym = sym.lower() + ENT = re.compile(r"&(\w+?);") + sym = ENT.sub(descape_entity, sym) + sym = sym.replace(".", "") + return sym + + +def tree2semi_rel(tree): + """ + Group a chunk structure into a list of 'semi-relations' of the form (list(str), ``Tree``). + + In order to facilitate the construction of (``Tree``, string, ``Tree``) triples, this + identifies pairs whose first member is a list (possibly empty) of terminal + strings, and whose second member is a ``Tree`` of the form (NE_label, terminals). + + :param tree: a chunk tree + :return: a list of pairs (list(str), ``Tree``) + :rtype: list of tuple + """ + + from nltk.tree import Tree + + semi_rels = [] + semi_rel = [[], None] + + for dtr in tree: + if not isinstance(dtr, Tree): + semi_rel[0].append(dtr) + else: + # dtr is a Tree + semi_rel[1] = dtr + semi_rels.append(semi_rel) + semi_rel = [[], None] + return semi_rels + + +def semi_rel2reldict(pairs, window=5, trace=False): + """ + Converts the pairs generated by ``tree2semi_rel`` into a 'reldict': a dictionary which + stores information about the subject and object NEs plus the filler between them. + Additionally, a left and right context of length =< window are captured (within + a given input sentence). + + :param pairs: a pair of list(str) and ``Tree``, as generated by + :param window: a threshold for the number of items to include in the left and right context + :type window: int + :return: 'relation' dictionaries whose keys are 'lcon', 'subjclass', 'subjtext', 'subjsym', 'filler', objclass', objtext', 'objsym' and 'rcon' + :rtype: list(defaultdict) + """ + result = [] + while len(pairs) > 2: + reldict = defaultdict(str) + reldict["lcon"] = _join(pairs[0][0][-window:]) + reldict["subjclass"] = pairs[0][1].label() + reldict["subjtext"] = _join(pairs[0][1].leaves()) + reldict["subjsym"] = list2sym(pairs[0][1].leaves()) + reldict["filler"] = _join(pairs[1][0]) + reldict["untagged_filler"] = _join(pairs[1][0], untag=True) + reldict["objclass"] = pairs[1][1].label() + reldict["objtext"] = _join(pairs[1][1].leaves()) + reldict["objsym"] = list2sym(pairs[1][1].leaves()) + reldict["rcon"] = _join(pairs[2][0][:window]) + if trace: + print( + "(%s(%s, %s)" + % ( + reldict["untagged_filler"], + reldict["subjclass"], + reldict["objclass"], + ) + ) + result.append(reldict) + pairs = pairs[1:] + return result + + +def extract_rels(subjclass, objclass, doc, corpus="ace", pattern=None, window=10): + """ + Filter the output of ``semi_rel2reldict`` according to specified NE classes and a filler pattern. + + The parameters ``subjclass`` and ``objclass`` can be used to restrict the + Named Entities to particular types (any of 'LOCATION', 'ORGANIZATION', + 'PERSON', 'DURATION', 'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'). + + :param subjclass: the class of the subject Named Entity. + :type subjclass: str + :param objclass: the class of the object Named Entity. + :type objclass: str + :param doc: input document + :type doc: ieer document or a list of chunk trees + :param corpus: name of the corpus to take as input; possible values are + 'ieer' and 'conll2002' + :type corpus: str + :param pattern: a regular expression for filtering the fillers of + retrieved triples. + :type pattern: SRE_Pattern + :param window: filters out fillers which exceed this threshold + :type window: int + :return: see ``mk_reldicts`` + :rtype: list(defaultdict) + """ + + if subjclass and subjclass not in NE_CLASSES[corpus]: + if _expand(subjclass) in NE_CLASSES[corpus]: + subjclass = _expand(subjclass) + else: + raise ValueError( + "your value for the subject type has not been recognized: %s" + % subjclass + ) + if objclass and objclass not in NE_CLASSES[corpus]: + if _expand(objclass) in NE_CLASSES[corpus]: + objclass = _expand(objclass) + else: + raise ValueError( + "your value for the object type has not been recognized: %s" % objclass + ) + + if corpus == "ace" or corpus == "conll2002": + pairs = tree2semi_rel(doc) + elif corpus == "ieer": + pairs = tree2semi_rel(doc.text) + tree2semi_rel(doc.headline) + else: + raise ValueError("corpus type not recognized") + + reldicts = semi_rel2reldict(pairs) + + relfilter = lambda x: ( + x["subjclass"] == subjclass + and len(x["filler"].split()) <= window + and pattern.match(x["filler"]) + and x["objclass"] == objclass + ) + + return list(filter(relfilter, reldicts)) + + +def rtuple(reldict, lcon=False, rcon=False): + """ + Pretty print the reldict as an rtuple. + :param reldict: a relation dictionary + :type reldict: defaultdict + """ + items = [ + class_abbrev(reldict["subjclass"]), + reldict["subjtext"], + reldict["filler"], + class_abbrev(reldict["objclass"]), + reldict["objtext"], + ] + format = "[%s: %r] %r [%s: %r]" + if lcon: + items = [reldict["lcon"]] + items + format = "...%r)" + format + if rcon: + items.append(reldict["rcon"]) + format = format + "(%r..." + printargs = tuple(items) + return format % printargs + + +def clause(reldict, relsym): + """ + Print the relation in clausal form. + :param reldict: a relation dictionary + :type reldict: defaultdict + :param relsym: a label for the relation + :type relsym: str + """ + items = (relsym, reldict["subjsym"], reldict["objsym"]) + return "%s(%r, %r)" % items + + +####################################################### +# Demos of relation extraction with regular expressions +####################################################### + +############################################ +# Example of in(ORG, LOC) +############################################ +def in_demo(trace=0, sql=True): + """ + Select pairs of organizations and locations whose mentions occur with an + intervening occurrence of the preposition "in". + + If the sql parameter is set to True, then the entity pairs are loaded into + an in-memory database, and subsequently pulled out using an SQL "SELECT" + query. + """ + from nltk.corpus import ieer + + if sql: + try: + import sqlite3 + + connection = sqlite3.connect(":memory:") + cur = connection.cursor() + cur.execute( + """create table Locations + (OrgName text, LocationName text, DocID text)""" + ) + except ImportError: + import warnings + + warnings.warn("Cannot import sqlite; sql flag will be ignored.") + + IN = re.compile(r".*\bin\b(?!\b.+ing)") + + print() + print("IEER: in(ORG, LOC) -- just the clauses:") + print("=" * 45) + + for file in ieer.fileids(): + for doc in ieer.parsed_docs(file): + if trace: + print(doc.docno) + print("=" * 15) + for rel in extract_rels("ORG", "LOC", doc, corpus="ieer", pattern=IN): + print(clause(rel, relsym="IN")) + if sql: + try: + rtuple = (rel["subjtext"], rel["objtext"], doc.docno) + cur.execute( + """insert into Locations + values (?, ?, ?)""", + rtuple, + ) + connection.commit() + except NameError: + pass + + if sql: + try: + cur.execute( + """select OrgName from Locations + where LocationName = 'Atlanta'""" + ) + print() + print("Extract data from SQL table: ORGs in Atlanta") + print("-" * 15) + for row in cur: + print(row) + except NameError: + pass + + +############################################ +# Example of has_role(PER, LOC) +############################################ + + +def roles_demo(trace=0): + from nltk.corpus import ieer + + roles = r""" + (.*( # assorted roles + analyst| + chair(wo)?man| + commissioner| + counsel| + director| + economist| + editor| + executive| + foreman| + governor| + head| + lawyer| + leader| + librarian).*)| + manager| + partner| + president| + producer| + professor| + researcher| + spokes(wo)?man| + writer| + ,\sof\sthe?\s* # "X, of (the) Y" + """ + ROLES = re.compile(roles, re.VERBOSE) + + print() + print("IEER: has_role(PER, ORG) -- raw rtuples:") + print("=" * 45) + + for file in ieer.fileids(): + for doc in ieer.parsed_docs(file): + lcon = rcon = False + if trace: + print(doc.docno) + print("=" * 15) + lcon = rcon = True + for rel in extract_rels("PER", "ORG", doc, corpus="ieer", pattern=ROLES): + print(rtuple(rel, lcon=lcon, rcon=rcon)) + + +############################################## +### Show what's in the IEER Headlines +############################################## + + +def ieer_headlines(): + + from nltk.corpus import ieer + from nltk.tree import Tree + + print("IEER: First 20 Headlines") + print("=" * 45) + + trees = [ + (doc.docno, doc.headline) + for file in ieer.fileids() + for doc in ieer.parsed_docs(file) + ] + for tree in trees[:20]: + print() + print("%s:\n%s" % tree) + + +############################################# +## Dutch CONLL2002: take_on_role(PER, ORG +############################################# + + +def conllned(trace=1): + """ + Find the copula+'van' relation ('of') in the Dutch tagged training corpus + from CoNLL 2002. + """ + + from nltk.corpus import conll2002 + + vnv = """ + ( + is/V| # 3rd sing present and + was/V| # past forms of the verb zijn ('be') + werd/V| # and also present + wordt/V # past of worden ('become) + ) + .* # followed by anything + van/Prep # followed by van ('of') + """ + VAN = re.compile(vnv, re.VERBOSE) + + print() + print("Dutch CoNLL2002: van(PER, ORG) -- raw rtuples with context:") + print("=" * 45) + + for doc in conll2002.chunked_sents("ned.train"): + lcon = rcon = False + if trace: + lcon = rcon = True + for rel in extract_rels( + "PER", "ORG", doc, corpus="conll2002", pattern=VAN, window=10 + ): + print(rtuple(rel, lcon=lcon, rcon=rcon)) + + +############################################# +## Spanish CONLL2002: (PER, ORG) +############################################# + + +def conllesp(): + from nltk.corpus import conll2002 + + de = """ + .* + ( + de/SP| + del/SP + ) + """ + DE = re.compile(de, re.VERBOSE) + + print() + print("Spanish CoNLL2002: de(ORG, LOC) -- just the first 10 clauses:") + print("=" * 45) + rels = [ + rel + for doc in conll2002.chunked_sents("esp.train") + for rel in extract_rels("ORG", "LOC", doc, corpus="conll2002", pattern=DE) + ] + for r in rels[:10]: + print(clause(r, relsym="DE")) + print() + + +def ne_chunked(): + print() + print("1500 Sentences from Penn Treebank, as processed by NLTK NE Chunker") + print("=" * 45) + ROLE = re.compile( + r".*(chairman|president|trader|scientist|economist|analyst|partner).*" + ) + rels = [] + for i, sent in enumerate(nltk.corpus.treebank.tagged_sents()[:1500]): + sent = nltk.ne_chunk(sent) + rels = extract_rels("PER", "ORG", sent, corpus="ace", pattern=ROLE, window=7) + for rel in rels: + print(f"{i:<5}{rtuple(rel)}") + + +if __name__ == "__main__": + import nltk + from nltk.sem import relextract + + in_demo(trace=0) + roles_demo(trace=0) + conllned() + conllesp() + ieer_headlines() + ne_chunked() diff --git a/lib/python3.10/site-packages/nltk/sem/skolemize.py b/lib/python3.10/site-packages/nltk/sem/skolemize.py new file mode 100644 index 0000000000000000000000000000000000000000..6f98437cee85ecf4a023a71a3f4518e25893ef8d --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/skolemize.py @@ -0,0 +1,148 @@ +# Natural Language Toolkit: Semantic Interpretation +# +# Author: Ewan Klein +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from nltk.sem.logic import ( + AllExpression, + AndExpression, + ApplicationExpression, + EqualityExpression, + ExistsExpression, + IffExpression, + ImpExpression, + NegatedExpression, + OrExpression, + VariableExpression, + skolem_function, + unique_variable, +) + + +def skolemize(expression, univ_scope=None, used_variables=None): + """ + Skolemize the expression and convert to conjunctive normal form (CNF) + """ + if univ_scope is None: + univ_scope = set() + if used_variables is None: + used_variables = set() + + if isinstance(expression, AllExpression): + term = skolemize( + expression.term, + univ_scope | {expression.variable}, + used_variables | {expression.variable}, + ) + return term.replace( + expression.variable, + VariableExpression(unique_variable(ignore=used_variables)), + ) + elif isinstance(expression, AndExpression): + return skolemize(expression.first, univ_scope, used_variables) & skolemize( + expression.second, univ_scope, used_variables + ) + elif isinstance(expression, OrExpression): + return to_cnf( + skolemize(expression.first, univ_scope, used_variables), + skolemize(expression.second, univ_scope, used_variables), + ) + elif isinstance(expression, ImpExpression): + return to_cnf( + skolemize(-expression.first, univ_scope, used_variables), + skolemize(expression.second, univ_scope, used_variables), + ) + elif isinstance(expression, IffExpression): + return to_cnf( + skolemize(-expression.first, univ_scope, used_variables), + skolemize(expression.second, univ_scope, used_variables), + ) & to_cnf( + skolemize(expression.first, univ_scope, used_variables), + skolemize(-expression.second, univ_scope, used_variables), + ) + elif isinstance(expression, EqualityExpression): + return expression + elif isinstance(expression, NegatedExpression): + negated = expression.term + if isinstance(negated, AllExpression): + term = skolemize( + -negated.term, univ_scope, used_variables | {negated.variable} + ) + if univ_scope: + return term.replace(negated.variable, skolem_function(univ_scope)) + else: + skolem_constant = VariableExpression( + unique_variable(ignore=used_variables) + ) + return term.replace(negated.variable, skolem_constant) + elif isinstance(negated, AndExpression): + return to_cnf( + skolemize(-negated.first, univ_scope, used_variables), + skolemize(-negated.second, univ_scope, used_variables), + ) + elif isinstance(negated, OrExpression): + return skolemize(-negated.first, univ_scope, used_variables) & skolemize( + -negated.second, univ_scope, used_variables + ) + elif isinstance(negated, ImpExpression): + return skolemize(negated.first, univ_scope, used_variables) & skolemize( + -negated.second, univ_scope, used_variables + ) + elif isinstance(negated, IffExpression): + return to_cnf( + skolemize(-negated.first, univ_scope, used_variables), + skolemize(-negated.second, univ_scope, used_variables), + ) & to_cnf( + skolemize(negated.first, univ_scope, used_variables), + skolemize(negated.second, univ_scope, used_variables), + ) + elif isinstance(negated, EqualityExpression): + return expression + elif isinstance(negated, NegatedExpression): + return skolemize(negated.term, univ_scope, used_variables) + elif isinstance(negated, ExistsExpression): + term = skolemize( + -negated.term, + univ_scope | {negated.variable}, + used_variables | {negated.variable}, + ) + return term.replace( + negated.variable, + VariableExpression(unique_variable(ignore=used_variables)), + ) + elif isinstance(negated, ApplicationExpression): + return expression + else: + raise Exception("'%s' cannot be skolemized" % expression) + elif isinstance(expression, ExistsExpression): + term = skolemize( + expression.term, univ_scope, used_variables | {expression.variable} + ) + if univ_scope: + return term.replace(expression.variable, skolem_function(univ_scope)) + else: + skolem_constant = VariableExpression(unique_variable(ignore=used_variables)) + return term.replace(expression.variable, skolem_constant) + elif isinstance(expression, ApplicationExpression): + return expression + else: + raise Exception("'%s' cannot be skolemized" % expression) + + +def to_cnf(first, second): + """ + Convert this split disjunction to conjunctive normal form (CNF) + """ + if isinstance(first, AndExpression): + r_first = to_cnf(first.first, second) + r_second = to_cnf(first.second, second) + return r_first & r_second + elif isinstance(second, AndExpression): + r_first = to_cnf(first, second.first) + r_second = to_cnf(first, second.second) + return r_first & r_second + else: + return first | second diff --git a/lib/python3.10/site-packages/nltk/sem/util.py b/lib/python3.10/site-packages/nltk/sem/util.py new file mode 100644 index 0000000000000000000000000000000000000000..8d119db424331b9b9873733a0acc6e9b3754a5cb --- /dev/null +++ b/lib/python3.10/site-packages/nltk/sem/util.py @@ -0,0 +1,309 @@ +# Natural Language Toolkit: Semantic Interpretation +# +# Author: Ewan Klein +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +Utility functions for batch-processing sentences: parsing and +extraction of the semantic representation of the root node of the the +syntax tree, followed by evaluation of the semantic representation in +a first-order model. +""" + +import codecs + +from nltk.sem import evaluate + +############################################################## +## Utility functions for connecting parse output to semantics +############################################################## + + +def parse_sents(inputs, grammar, trace=0): + """ + Convert input sentences into syntactic trees. + + :param inputs: sentences to be parsed + :type inputs: list(str) + :param grammar: ``FeatureGrammar`` or name of feature-based grammar + :type grammar: nltk.grammar.FeatureGrammar + :rtype: list(nltk.tree.Tree) or dict(list(str)): list(Tree) + :return: a mapping from input sentences to a list of ``Tree`` instances. + """ + # put imports here to avoid circult dependencies + from nltk.grammar import FeatureGrammar + from nltk.parse import FeatureChartParser, load_parser + + if isinstance(grammar, FeatureGrammar): + cp = FeatureChartParser(grammar) + else: + cp = load_parser(grammar, trace=trace) + parses = [] + for sent in inputs: + tokens = sent.split() # use a tokenizer? + syntrees = list(cp.parse(tokens)) + parses.append(syntrees) + return parses + + +def root_semrep(syntree, semkey="SEM"): + """ + Find the semantic representation at the root of a tree. + + :param syntree: a parse ``Tree`` + :param semkey: the feature label to use for the root semantics in the tree + :return: the semantic representation at the root of a ``Tree`` + :rtype: sem.Expression + """ + from nltk.grammar import FeatStructNonterminal + + node = syntree.label() + assert isinstance(node, FeatStructNonterminal) + try: + return node[semkey] + except KeyError: + print(node, end=" ") + print("has no specification for the feature %s" % semkey) + raise + + +def interpret_sents(inputs, grammar, semkey="SEM", trace=0): + """ + Add the semantic representation to each syntactic parse tree + of each input sentence. + + :param inputs: a list of sentences + :type inputs: list(str) + :param grammar: ``FeatureGrammar`` or name of feature-based grammar + :type grammar: nltk.grammar.FeatureGrammar + :return: a mapping from sentences to lists of pairs (parse-tree, semantic-representations) + :rtype: list(list(tuple(nltk.tree.Tree, nltk.sem.logic.ConstantExpression))) + """ + return [ + [(syn, root_semrep(syn, semkey)) for syn in syntrees] + for syntrees in parse_sents(inputs, grammar, trace=trace) + ] + + +def evaluate_sents(inputs, grammar, model, assignment, trace=0): + """ + Add the truth-in-a-model value to each semantic representation + for each syntactic parse of each input sentences. + + :param inputs: a list of sentences + :type inputs: list(str) + :param grammar: ``FeatureGrammar`` or name of feature-based grammar + :type grammar: nltk.grammar.FeatureGrammar + :return: a mapping from sentences to lists of triples (parse-tree, semantic-representations, evaluation-in-model) + :rtype: list(list(tuple(nltk.tree.Tree, nltk.sem.logic.ConstantExpression, bool or dict(str): bool))) + """ + return [ + [ + (syn, sem, model.evaluate("%s" % sem, assignment, trace=trace)) + for (syn, sem) in interpretations + ] + for interpretations in interpret_sents(inputs, grammar) + ] + + +def demo_model0(): + global m0, g0 + # Initialize a valuation of non-logical constants.""" + v = [ + ("john", "b1"), + ("mary", "g1"), + ("suzie", "g2"), + ("fido", "d1"), + ("tess", "d2"), + ("noosa", "n"), + ("girl", {"g1", "g2"}), + ("boy", {"b1", "b2"}), + ("dog", {"d1", "d2"}), + ("bark", {"d1", "d2"}), + ("walk", {"b1", "g2", "d1"}), + ("chase", {("b1", "g1"), ("b2", "g1"), ("g1", "d1"), ("g2", "d2")}), + ( + "see", + {("b1", "g1"), ("b2", "d2"), ("g1", "b1"), ("d2", "b1"), ("g2", "n")}, + ), + ("in", {("b1", "n"), ("b2", "n"), ("d2", "n")}), + ("with", {("b1", "g1"), ("g1", "b1"), ("d1", "b1"), ("b1", "d1")}), + ] + # Read in the data from ``v`` + val = evaluate.Valuation(v) + # Bind ``dom`` to the ``domain`` property of ``val`` + dom = val.domain + # Initialize a model with parameters ``dom`` and ``val``. + m0 = evaluate.Model(dom, val) + # Initialize a variable assignment with parameter ``dom`` + g0 = evaluate.Assignment(dom) + + +def read_sents(filename, encoding="utf8"): + with codecs.open(filename, "r", encoding) as fp: + sents = [l.rstrip() for l in fp] + + # get rid of blank lines + sents = [l for l in sents if len(l) > 0] + sents = [l for l in sents if not l[0] == "#"] + return sents + + +def demo_legacy_grammar(): + """ + Check that interpret_sents() is compatible with legacy grammars that use + a lowercase 'sem' feature. + + Define 'test.fcfg' to be the following + + """ + from nltk.grammar import FeatureGrammar + + g = FeatureGrammar.fromstring( + """ + % start S + S[sem=] -> 'hello' + """ + ) + print("Reading grammar: %s" % g) + print("*" * 20) + for reading in interpret_sents(["hello"], g, semkey="sem"): + syn, sem = reading[0] + print() + print("output: ", sem) + + +def demo(): + import sys + from optparse import OptionParser + + description = """ + Parse and evaluate some sentences. + """ + + opts = OptionParser(description=description) + + opts.set_defaults( + evaluate=True, + beta=True, + syntrace=0, + semtrace=0, + demo="default", + grammar="", + sentences="", + ) + + opts.add_option( + "-d", + "--demo", + dest="demo", + help="choose demo D; omit this for the default demo, or specify 'chat80'", + metavar="D", + ) + opts.add_option( + "-g", "--gram", dest="grammar", help="read in grammar G", metavar="G" + ) + opts.add_option( + "-m", + "--model", + dest="model", + help="import model M (omit '.py' suffix)", + metavar="M", + ) + opts.add_option( + "-s", + "--sentences", + dest="sentences", + help="read in a file of test sentences S", + metavar="S", + ) + opts.add_option( + "-e", + "--no-eval", + action="store_false", + dest="evaluate", + help="just do a syntactic analysis", + ) + opts.add_option( + "-b", + "--no-beta-reduction", + action="store_false", + dest="beta", + help="don't carry out beta-reduction", + ) + opts.add_option( + "-t", + "--syntrace", + action="count", + dest="syntrace", + help="set syntactic tracing on; requires '-e' option", + ) + opts.add_option( + "-T", + "--semtrace", + action="count", + dest="semtrace", + help="set semantic tracing on", + ) + + (options, args) = opts.parse_args() + + SPACER = "-" * 30 + + demo_model0() + + sents = [ + "Fido sees a boy with Mary", + "John sees Mary", + "every girl chases a dog", + "every boy chases a girl", + "John walks with a girl in Noosa", + "who walks", + ] + + gramfile = "grammars/sample_grammars/sem2.fcfg" + + if options.sentences: + sentsfile = options.sentences + if options.grammar: + gramfile = options.grammar + if options.model: + exec("import %s as model" % options.model) + + if sents is None: + sents = read_sents(sentsfile) + + # Set model and assignment + model = m0 + g = g0 + + if options.evaluate: + evaluations = evaluate_sents(sents, gramfile, model, g, trace=options.semtrace) + else: + semreps = interpret_sents(sents, gramfile, trace=options.syntrace) + + for i, sent in enumerate(sents): + n = 1 + print("\nSentence: %s" % sent) + print(SPACER) + if options.evaluate: + + for (syntree, semrep, value) in evaluations[i]: + if isinstance(value, dict): + value = set(value.keys()) + print("%d: %s" % (n, semrep)) + print(value) + n += 1 + else: + + for (syntree, semrep) in semreps[i]: + print("%d: %s" % (n, semrep)) + n += 1 + + +if __name__ == "__main__": + demo() + demo_legacy_grammar() diff --git a/lib/python3.10/site-packages/nltk/stem/__init__.py b/lib/python3.10/site-packages/nltk/stem/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f46ec0c26303eea6837bc070d8e77b56b48e29f --- /dev/null +++ b/lib/python3.10/site-packages/nltk/stem/__init__.py @@ -0,0 +1,34 @@ +# Natural Language Toolkit: Stemmers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Stemmers + +Interfaces used to remove morphological affixes from words, leaving +only the word stem. Stemming algorithms aim to remove those affixes +required for eg. grammatical role, tense, derivational morphology +leaving only the stem of the word. This is a difficult problem due to +irregular words (eg. common verbs in English), complicated +morphological rules, and part-of-speech and sense ambiguities +(eg. ``ceil-`` is not the stem of ``ceiling``). + +StemmerI defines a standard interface for stemmers. +""" + +from nltk.stem.api import StemmerI +from nltk.stem.arlstem import ARLSTem +from nltk.stem.arlstem2 import ARLSTem2 +from nltk.stem.cistem import Cistem +from nltk.stem.isri import ISRIStemmer +from nltk.stem.lancaster import LancasterStemmer +from nltk.stem.porter import PorterStemmer +from nltk.stem.regexp import RegexpStemmer +from nltk.stem.rslp import RSLPStemmer +from nltk.stem.snowball import SnowballStemmer +from nltk.stem.wordnet import WordNetLemmatizer diff --git a/lib/python3.10/site-packages/nltk/stem/api.py b/lib/python3.10/site-packages/nltk/stem/api.py new file mode 100644 index 0000000000000000000000000000000000000000..7a58c059a10ca2649faeb695d042a0c6cbb9ec69 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/stem/api.py @@ -0,0 +1,27 @@ +# Natural Language Toolkit: Stemmer Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod + + +class StemmerI(metaclass=ABCMeta): + """ + A processing interface for removing morphological affixes from + words. This process is known as stemming. + + """ + + @abstractmethod + def stem(self, token): + """ + Strip affixes from the token and return the stem. + + :param token: The token that should be stemmed. + :type token: str + """ diff --git a/lib/python3.10/site-packages/nltk/stem/cistem.py b/lib/python3.10/site-packages/nltk/stem/cistem.py new file mode 100644 index 0000000000000000000000000000000000000000..69c07a42a373cec1eca9d75e9d474c4c1063e70b --- /dev/null +++ b/lib/python3.10/site-packages/nltk/stem/cistem.py @@ -0,0 +1,209 @@ +# Natural Language Toolkit: CISTEM Stemmer for German +# Copyright (C) 2001-2023 NLTK Project +# Author: Leonie Weissweiler +# Tom Aarsen <> (modifications) +# Algorithm: Leonie Weissweiler +# Alexander Fraser +# URL: +# For license information, see LICENSE.TXT + +import re +from typing import Tuple + +from nltk.stem.api import StemmerI + + +class Cistem(StemmerI): + """ + CISTEM Stemmer for German + + This is the official Python implementation of the CISTEM stemmer. + It is based on the paper + Leonie Weissweiler, Alexander Fraser (2017). Developing a Stemmer for German + Based on a Comparative Analysis of Publicly Available Stemmers. + In Proceedings of the German Society for Computational Linguistics and Language + Technology (GSCL) + which can be read here: + https://www.cis.lmu.de/~weissweiler/cistem/ + + In the paper, we conducted an analysis of publicly available stemmers, + developed two gold standards for German stemming and evaluated the stemmers + based on the two gold standards. We then proposed the stemmer implemented here + and show that it achieves slightly better f-measure than the other stemmers and + is thrice as fast as the Snowball stemmer for German while being about as fast + as most other stemmers. + + case_insensitive is a a boolean specifying if case-insensitive stemming + should be used. Case insensitivity improves performance only if words in the + text may be incorrectly upper case. For all-lowercase and correctly cased + text, best performance is achieved by setting case_insensitive for false. + + :param case_insensitive: if True, the stemming is case insensitive. False by default. + :type case_insensitive: bool + """ + + strip_ge = re.compile(r"^ge(.{4,})") + repl_xx = re.compile(r"(.)\1") + strip_emr = re.compile(r"e[mr]$") + strip_nd = re.compile(r"nd$") + strip_t = re.compile(r"t$") + strip_esn = re.compile(r"[esn]$") + repl_xx_back = re.compile(r"(.)\*") + + def __init__(self, case_insensitive: bool = False): + self._case_insensitive = case_insensitive + + @staticmethod + def replace_to(word: str) -> str: + word = word.replace("sch", "$") + word = word.replace("ei", "%") + word = word.replace("ie", "&") + word = Cistem.repl_xx.sub(r"\1*", word) + + return word + + @staticmethod + def replace_back(word: str) -> str: + word = Cistem.repl_xx_back.sub(r"\1\1", word) + word = word.replace("%", "ei") + word = word.replace("&", "ie") + word = word.replace("$", "sch") + + return word + + def stem(self, word: str) -> str: + """Stems the input word. + + :param word: The word that is to be stemmed. + :type word: str + :return: The stemmed word. + :rtype: str + + >>> from nltk.stem.cistem import Cistem + >>> stemmer = Cistem() + >>> s1 = "Speicherbehältern" + >>> stemmer.stem(s1) + 'speicherbehalt' + >>> s2 = "Grenzpostens" + >>> stemmer.stem(s2) + 'grenzpost' + >>> s3 = "Ausgefeiltere" + >>> stemmer.stem(s3) + 'ausgefeilt' + >>> stemmer = Cistem(True) + >>> stemmer.stem(s1) + 'speicherbehal' + >>> stemmer.stem(s2) + 'grenzpo' + >>> stemmer.stem(s3) + 'ausgefeil' + """ + if len(word) == 0: + return word + + upper = word[0].isupper() + word = word.lower() + + word = word.replace("ü", "u") + word = word.replace("ö", "o") + word = word.replace("ä", "a") + word = word.replace("ß", "ss") + + word = Cistem.strip_ge.sub(r"\1", word) + + return self._segment_inner(word, upper)[0] + + def segment(self, word: str) -> Tuple[str, str]: + """ + This method works very similarly to stem (:func:'cistem.stem'). The difference is that in + addition to returning the stem, it also returns the rest that was removed at + the end. To be able to return the stem unchanged so the stem and the rest + can be concatenated to form the original word, all subsitutions that altered + the stem in any other way than by removing letters at the end were left out. + + :param word: The word that is to be stemmed. + :type word: str + :return: A tuple of the stemmed word and the removed suffix. + :rtype: Tuple[str, str] + + >>> from nltk.stem.cistem import Cistem + >>> stemmer = Cistem() + >>> s1 = "Speicherbehältern" + >>> stemmer.segment(s1) + ('speicherbehält', 'ern') + >>> s2 = "Grenzpostens" + >>> stemmer.segment(s2) + ('grenzpost', 'ens') + >>> s3 = "Ausgefeiltere" + >>> stemmer.segment(s3) + ('ausgefeilt', 'ere') + >>> stemmer = Cistem(True) + >>> stemmer.segment(s1) + ('speicherbehäl', 'tern') + >>> stemmer.segment(s2) + ('grenzpo', 'stens') + >>> stemmer.segment(s3) + ('ausgefeil', 'tere') + """ + if len(word) == 0: + return ("", "") + + upper = word[0].isupper() + word = word.lower() + + return self._segment_inner(word, upper) + + def _segment_inner(self, word: str, upper: bool): + """Inner method for iteratively applying the code stemming regexes. + This method receives a pre-processed variant of the word to be stemmed, + or the word to be segmented, and returns a tuple of the word and the + removed suffix. + + :param word: A pre-processed variant of the word that is to be stemmed. + :type word: str + :param upper: Whether the original word started with a capital letter. + :type upper: bool + :return: A tuple of the stemmed word and the removed suffix. + :rtype: Tuple[str, str] + """ + + rest_length = 0 + word_copy = word[:] + + # Pre-processing before applying the substitution patterns + word = Cistem.replace_to(word) + rest = "" + + # Apply the substitution patterns + while len(word) > 3: + if len(word) > 5: + word, n = Cistem.strip_emr.subn("", word) + if n != 0: + rest_length += 2 + continue + + word, n = Cistem.strip_nd.subn("", word) + if n != 0: + rest_length += 2 + continue + + if not upper or self._case_insensitive: + word, n = Cistem.strip_t.subn("", word) + if n != 0: + rest_length += 1 + continue + + word, n = Cistem.strip_esn.subn("", word) + if n != 0: + rest_length += 1 + continue + else: + break + + # Post-processing after applying the substitution patterns + word = Cistem.replace_back(word) + + if rest_length: + rest = word_copy[-rest_length:] + + return (word, rest) diff --git a/lib/python3.10/site-packages/nltk/stem/isri.py b/lib/python3.10/site-packages/nltk/stem/isri.py new file mode 100644 index 0000000000000000000000000000000000000000..4ae91f1fafaf713330ce78696873e258487d2d0a --- /dev/null +++ b/lib/python3.10/site-packages/nltk/stem/isri.py @@ -0,0 +1,395 @@ +# +# Natural Language Toolkit: The ISRI Arabic Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# Algorithm: Kazem Taghva, Rania Elkhoury, and Jeffrey Coombs (2005) +# Author: Hosam Algasaier +# URL: +# For license information, see LICENSE.TXT + +""" +ISRI Arabic Stemmer + +The algorithm for this stemmer is described in: + +Taghva, K., Elkoury, R., and Coombs, J. 2005. Arabic Stemming without a root dictionary. +Information Science Research Institute. University of Nevada, Las Vegas, USA. + +The Information Science Research Institute’s (ISRI) Arabic stemmer shares many features +with the Khoja stemmer. However, the main difference is that ISRI stemmer does not use root +dictionary. Also, if a root is not found, ISRI stemmer returned normalized form, rather than +returning the original unmodified word. + +Additional adjustments were made to improve the algorithm: + +1- Adding 60 stop words. +2- Adding the pattern (تفاعيل) to ISRI pattern set. +3- The step 2 in the original algorithm was normalizing all hamza. This step is discarded because it +increases the word ambiguities and changes the original root. + +""" +import re + +from nltk.stem.api import StemmerI + + +class ISRIStemmer(StemmerI): + """ + ISRI Arabic stemmer based on algorithm: Arabic Stemming without a root dictionary. + Information Science Research Institute. University of Nevada, Las Vegas, USA. + + A few minor modifications have been made to ISRI basic algorithm. + See the source code of this module for more information. + + isri.stem(token) returns Arabic root for the given token. + + The ISRI Stemmer requires that all tokens have Unicode string types. + If you use Python IDLE on Arabic Windows you have to decode text first + using Arabic '1256' coding. + """ + + def __init__(self): + # length three prefixes + self.p3 = [ + "\u0643\u0627\u0644", + "\u0628\u0627\u0644", + "\u0648\u0644\u0644", + "\u0648\u0627\u0644", + ] + + # length two prefixes + self.p2 = ["\u0627\u0644", "\u0644\u0644"] + + # length one prefixes + self.p1 = [ + "\u0644", + "\u0628", + "\u0641", + "\u0633", + "\u0648", + "\u064a", + "\u062a", + "\u0646", + "\u0627", + ] + + # length three suffixes + self.s3 = [ + "\u062a\u0645\u0644", + "\u0647\u0645\u0644", + "\u062a\u0627\u0646", + "\u062a\u064a\u0646", + "\u0643\u0645\u0644", + ] + + # length two suffixes + self.s2 = [ + "\u0648\u0646", + "\u0627\u062a", + "\u0627\u0646", + "\u064a\u0646", + "\u062a\u0646", + "\u0643\u0645", + "\u0647\u0646", + "\u0646\u0627", + "\u064a\u0627", + "\u0647\u0627", + "\u062a\u0645", + "\u0643\u0646", + "\u0646\u064a", + "\u0648\u0627", + "\u0645\u0627", + "\u0647\u0645", + ] + + # length one suffixes + self.s1 = ["\u0629", "\u0647", "\u064a", "\u0643", "\u062a", "\u0627", "\u0646"] + + # groups of length four patterns + self.pr4 = { + 0: ["\u0645"], + 1: ["\u0627"], + 2: ["\u0627", "\u0648", "\u064A"], + 3: ["\u0629"], + } + + # Groups of length five patterns and length three roots + self.pr53 = { + 0: ["\u0627", "\u062a"], + 1: ["\u0627", "\u064a", "\u0648"], + 2: ["\u0627", "\u062a", "\u0645"], + 3: ["\u0645", "\u064a", "\u062a"], + 4: ["\u0645", "\u062a"], + 5: ["\u0627", "\u0648"], + 6: ["\u0627", "\u0645"], + } + + self.re_short_vowels = re.compile(r"[\u064B-\u0652]") + self.re_hamza = re.compile(r"[\u0621\u0624\u0626]") + self.re_initial_hamza = re.compile(r"^[\u0622\u0623\u0625]") + + self.stop_words = [ + "\u064a\u0643\u0648\u0646", + "\u0648\u0644\u064a\u0633", + "\u0648\u0643\u0627\u0646", + "\u0643\u0630\u0644\u0643", + "\u0627\u0644\u062a\u064a", + "\u0648\u0628\u064a\u0646", + "\u0639\u0644\u064a\u0647\u0627", + "\u0645\u0633\u0627\u0621", + "\u0627\u0644\u0630\u064a", + "\u0648\u0643\u0627\u0646\u062a", + "\u0648\u0644\u0643\u0646", + "\u0648\u0627\u0644\u062a\u064a", + "\u062a\u0643\u0648\u0646", + "\u0627\u0644\u064a\u0648\u0645", + "\u0627\u0644\u0644\u0630\u064a\u0646", + "\u0639\u0644\u064a\u0647", + "\u0643\u0627\u0646\u062a", + "\u0644\u0630\u0644\u0643", + "\u0623\u0645\u0627\u0645", + "\u0647\u0646\u0627\u0643", + "\u0645\u0646\u0647\u0627", + "\u0645\u0627\u0632\u0627\u0644", + "\u0644\u0627\u0632\u0627\u0644", + "\u0644\u0627\u064a\u0632\u0627\u0644", + "\u0645\u0627\u064a\u0632\u0627\u0644", + "\u0627\u0635\u0628\u062d", + "\u0623\u0635\u0628\u062d", + "\u0623\u0645\u0633\u0649", + "\u0627\u0645\u0633\u0649", + "\u0623\u0636\u062d\u0649", + "\u0627\u0636\u062d\u0649", + "\u0645\u0627\u0628\u0631\u062d", + "\u0645\u0627\u0641\u062a\u0626", + "\u0645\u0627\u0627\u0646\u0641\u0643", + "\u0644\u0627\u0633\u064a\u0645\u0627", + "\u0648\u0644\u0627\u064a\u0632\u0627\u0644", + "\u0627\u0644\u062d\u0627\u0644\u064a", + "\u0627\u0644\u064a\u0647\u0627", + "\u0627\u0644\u0630\u064a\u0646", + "\u0641\u0627\u0646\u0647", + "\u0648\u0627\u0644\u0630\u064a", + "\u0648\u0647\u0630\u0627", + "\u0644\u0647\u0630\u0627", + "\u0641\u0643\u0627\u0646", + "\u0633\u062a\u0643\u0648\u0646", + "\u0627\u0644\u064a\u0647", + "\u064a\u0645\u0643\u0646", + "\u0628\u0647\u0630\u0627", + "\u0627\u0644\u0630\u0649", + ] + + def stem(self, token): + """ + Stemming a word token using the ISRI stemmer. + """ + token = self.norm( + token, 1 + ) # remove diacritics which representing Arabic short vowels + if token in self.stop_words: + return token # exclude stop words from being processed + token = self.pre32( + token + ) # remove length three and length two prefixes in this order + token = self.suf32( + token + ) # remove length three and length two suffixes in this order + token = self.waw( + token + ) # remove connective ‘و’ if it precedes a word beginning with ‘و’ + token = self.norm(token, 2) # normalize initial hamza to bare alif + # if 4 <= word length <= 7, then stem; otherwise, no stemming + if len(token) == 4: # length 4 word + token = self.pro_w4(token) + elif len(token) == 5: # length 5 word + token = self.pro_w53(token) + token = self.end_w5(token) + elif len(token) == 6: # length 6 word + token = self.pro_w6(token) + token = self.end_w6(token) + elif len(token) == 7: # length 7 word + token = self.suf1(token) + if len(token) == 7: + token = self.pre1(token) + if len(token) == 6: + token = self.pro_w6(token) + token = self.end_w6(token) + return token + + def norm(self, word, num=3): + """ + normalization: + num=1 normalize diacritics + num=2 normalize initial hamza + num=3 both 1&2 + """ + if num == 1: + word = self.re_short_vowels.sub("", word) + elif num == 2: + word = self.re_initial_hamza.sub("\u0627", word) + elif num == 3: + word = self.re_short_vowels.sub("", word) + word = self.re_initial_hamza.sub("\u0627", word) + return word + + def pre32(self, word): + """remove length three and length two prefixes in this order""" + if len(word) >= 6: + for pre3 in self.p3: + if word.startswith(pre3): + return word[3:] + if len(word) >= 5: + for pre2 in self.p2: + if word.startswith(pre2): + return word[2:] + return word + + def suf32(self, word): + """remove length three and length two suffixes in this order""" + if len(word) >= 6: + for suf3 in self.s3: + if word.endswith(suf3): + return word[:-3] + if len(word) >= 5: + for suf2 in self.s2: + if word.endswith(suf2): + return word[:-2] + return word + + def waw(self, word): + """remove connective ‘و’ if it precedes a word beginning with ‘و’""" + if len(word) >= 4 and word[:2] == "\u0648\u0648": + word = word[1:] + return word + + def pro_w4(self, word): + """process length four patterns and extract length three roots""" + if word[0] in self.pr4[0]: # مفعل + word = word[1:] + elif word[1] in self.pr4[1]: # فاعل + word = word[:1] + word[2:] + elif word[2] in self.pr4[2]: # فعال - فعول - فعيل + word = word[:2] + word[3] + elif word[3] in self.pr4[3]: # فعلة + word = word[:-1] + else: + word = self.suf1(word) # do - normalize short sufix + if len(word) == 4: + word = self.pre1(word) # do - normalize short prefix + return word + + def pro_w53(self, word): + """process length five patterns and extract length three roots""" + if word[2] in self.pr53[0] and word[0] == "\u0627": # افتعل - افاعل + word = word[1] + word[3:] + elif word[3] in self.pr53[1] and word[0] == "\u0645": # مفعول - مفعال - مفعيل + word = word[1:3] + word[4] + elif word[0] in self.pr53[2] and word[4] == "\u0629": # مفعلة - تفعلة - افعلة + word = word[1:4] + elif word[0] in self.pr53[3] and word[2] == "\u062a": # مفتعل - يفتعل - تفتعل + word = word[1] + word[3:] + elif word[0] in self.pr53[4] and word[2] == "\u0627": # مفاعل - تفاعل + word = word[1] + word[3:] + elif word[2] in self.pr53[5] and word[4] == "\u0629": # فعولة - فعالة + word = word[:2] + word[3] + elif word[0] in self.pr53[6] and word[1] == "\u0646": # انفعل - منفعل + word = word[2:] + elif word[3] == "\u0627" and word[0] == "\u0627": # افعال + word = word[1:3] + word[4] + elif word[4] == "\u0646" and word[3] == "\u0627": # فعلان + word = word[:3] + elif word[3] == "\u064a" and word[0] == "\u062a": # تفعيل + word = word[1:3] + word[4] + elif word[3] == "\u0648" and word[1] == "\u0627": # فاعول + word = word[0] + word[2] + word[4] + elif word[2] == "\u0627" and word[1] == "\u0648": # فواعل + word = word[0] + word[3:] + elif word[3] == "\u0626" and word[2] == "\u0627": # فعائل + word = word[:2] + word[4] + elif word[4] == "\u0629" and word[1] == "\u0627": # فاعلة + word = word[0] + word[2:4] + elif word[4] == "\u064a" and word[2] == "\u0627": # فعالي + word = word[:2] + word[3] + else: + word = self.suf1(word) # do - normalize short sufix + if len(word) == 5: + word = self.pre1(word) # do - normalize short prefix + return word + + def pro_w54(self, word): + """process length five patterns and extract length four roots""" + if word[0] in self.pr53[2]: # تفعلل - افعلل - مفعلل + word = word[1:] + elif word[4] == "\u0629": # فعللة + word = word[:4] + elif word[2] == "\u0627": # فعالل + word = word[:2] + word[3:] + return word + + def end_w5(self, word): + """ending step (word of length five)""" + if len(word) == 4: + word = self.pro_w4(word) + elif len(word) == 5: + word = self.pro_w54(word) + return word + + def pro_w6(self, word): + """process length six patterns and extract length three roots""" + if word.startswith("\u0627\u0633\u062a") or word.startswith( + "\u0645\u0633\u062a" + ): # مستفعل - استفعل + word = word[3:] + elif ( + word[0] == "\u0645" and word[3] == "\u0627" and word[5] == "\u0629" + ): # مفعالة + word = word[1:3] + word[4] + elif ( + word[0] == "\u0627" and word[2] == "\u062a" and word[4] == "\u0627" + ): # افتعال + word = word[1] + word[3] + word[5] + elif ( + word[0] == "\u0627" and word[3] == "\u0648" and word[2] == word[4] + ): # افعوعل + word = word[1] + word[4:] + elif ( + word[0] == "\u062a" and word[2] == "\u0627" and word[4] == "\u064a" + ): # تفاعيل new pattern + word = word[1] + word[3] + word[5] + else: + word = self.suf1(word) # do - normalize short sufix + if len(word) == 6: + word = self.pre1(word) # do - normalize short prefix + return word + + def pro_w64(self, word): + """process length six patterns and extract length four roots""" + if word[0] == "\u0627" and word[4] == "\u0627": # افعلال + word = word[1:4] + word[5] + elif word.startswith("\u0645\u062a"): # متفعلل + word = word[2:] + return word + + def end_w6(self, word): + """ending step (word of length six)""" + if len(word) == 5: + word = self.pro_w53(word) + word = self.end_w5(word) + elif len(word) == 6: + word = self.pro_w64(word) + return word + + def suf1(self, word): + """normalize short sufix""" + for sf1 in self.s1: + if word.endswith(sf1): + return word[:-1] + return word + + def pre1(self, word): + """normalize short prefix""" + for sp1 in self.p1: + if word.startswith(sp1): + return word[1:] + return word diff --git a/lib/python3.10/site-packages/nltk/tag/__init__.py b/lib/python3.10/site-packages/nltk/tag/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f537dd6c7a9badc43313a8d2b4c5efed9b1b6ce --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/__init__.py @@ -0,0 +1,184 @@ +# Natural Language Toolkit: Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT +""" +NLTK Taggers + +This package contains classes and interfaces for part-of-speech +tagging, or simply "tagging". + +A "tag" is a case-sensitive string that specifies some property of a token, +such as its part of speech. Tagged tokens are encoded as tuples +``(tag, token)``. For example, the following tagged token combines +the word ``'fly'`` with a noun part of speech tag (``'NN'``): + + >>> tagged_tok = ('fly', 'NN') + +An off-the-shelf tagger is available for English. It uses the Penn Treebank tagset: + + >>> from nltk import pos_tag, word_tokenize + >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE + [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'), + ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')] + +A Russian tagger is also available if you specify lang="rus". It uses +the Russian National Corpus tagset: + + >>> pos_tag(word_tokenize("Илья оторопел и дважды перечитал бумажку."), lang='rus') # doctest: +SKIP + [('Илья', 'S'), ('оторопел', 'V'), ('и', 'CONJ'), ('дважды', 'ADV'), ('перечитал', 'V'), + ('бумажку', 'S'), ('.', 'NONLEX')] + +This package defines several taggers, which take a list of tokens, +assign a tag to each one, and return the resulting list of tagged tokens. +Most of the taggers are built automatically based on a training corpus. +For example, the unigram tagger tags each word *w* by checking what +the most frequent tag for *w* was in a training corpus: + + >>> from nltk.corpus import brown + >>> from nltk.tag import UnigramTagger + >>> tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500]) + >>> sent = ['Mitchell', 'decried', 'the', 'high', 'rate', 'of', 'unemployment'] + >>> for word, tag in tagger.tag(sent): + ... print(word, '->', tag) + Mitchell -> NP + decried -> None + the -> AT + high -> JJ + rate -> NN + of -> IN + unemployment -> None + +Note that words that the tagger has not seen during training receive a tag +of ``None``. + +We evaluate a tagger on data that was not seen during training: + + >>> round(tagger.accuracy(brown.tagged_sents(categories='news')[500:600]), 3) + 0.735 + +For more information, please consult chapter 5 of the NLTK Book. + +isort:skip_file +""" + +from nltk.tag.api import TaggerI +from nltk.tag.util import str2tuple, tuple2str, untag +from nltk.tag.sequential import ( + SequentialBackoffTagger, + ContextTagger, + DefaultTagger, + NgramTagger, + UnigramTagger, + BigramTagger, + TrigramTagger, + AffixTagger, + RegexpTagger, + ClassifierBasedTagger, + ClassifierBasedPOSTagger, +) +from nltk.tag.brill import BrillTagger +from nltk.tag.brill_trainer import BrillTaggerTrainer +from nltk.tag.tnt import TnT +from nltk.tag.hunpos import HunposTagger +from nltk.tag.stanford import StanfordTagger, StanfordPOSTagger, StanfordNERTagger +from nltk.tag.hmm import HiddenMarkovModelTagger, HiddenMarkovModelTrainer +from nltk.tag.senna import SennaTagger, SennaChunkTagger, SennaNERTagger +from nltk.tag.mapping import tagset_mapping, map_tag +from nltk.tag.crf import CRFTagger +from nltk.tag.perceptron import PerceptronTagger + +from nltk.data import load, find + +RUS_PICKLE = ( + "taggers/averaged_perceptron_tagger_ru/averaged_perceptron_tagger_ru.pickle" +) + + +def _get_tagger(lang=None): + if lang == "rus": + tagger = PerceptronTagger(False) + ap_russian_model_loc = "file:" + str(find(RUS_PICKLE)) + tagger.load(ap_russian_model_loc) + else: + tagger = PerceptronTagger() + return tagger + + +def _pos_tag(tokens, tagset=None, tagger=None, lang=None): + # Currently only supports English and Russian. + if lang not in ["eng", "rus"]: + raise NotImplementedError( + "Currently, NLTK pos_tag only supports English and Russian " + "(i.e. lang='eng' or lang='rus')" + ) + # Throws Error if tokens is of string type + elif isinstance(tokens, str): + raise TypeError("tokens: expected a list of strings, got a string") + + else: + tagged_tokens = tagger.tag(tokens) + if tagset: # Maps to the specified tagset. + if lang == "eng": + tagged_tokens = [ + (token, map_tag("en-ptb", tagset, tag)) + for (token, tag) in tagged_tokens + ] + elif lang == "rus": + # Note that the new Russian pos tags from the model contains suffixes, + # see https://github.com/nltk/nltk/issues/2151#issuecomment-430709018 + tagged_tokens = [ + (token, map_tag("ru-rnc-new", tagset, tag.partition("=")[0])) + for (token, tag) in tagged_tokens + ] + return tagged_tokens + + +def pos_tag(tokens, tagset=None, lang="eng"): + """ + Use NLTK's currently recommended part of speech tagger to + tag the given list of tokens. + + >>> from nltk.tag import pos_tag + >>> from nltk.tokenize import word_tokenize + >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE + [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'), + ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')] + >>> pos_tag(word_tokenize("John's big idea isn't all that bad."), tagset='universal') # doctest: +NORMALIZE_WHITESPACE + [('John', 'NOUN'), ("'s", 'PRT'), ('big', 'ADJ'), ('idea', 'NOUN'), ('is', 'VERB'), + ("n't", 'ADV'), ('all', 'DET'), ('that', 'DET'), ('bad', 'ADJ'), ('.', '.')] + + NB. Use `pos_tag_sents()` for efficient tagging of more than one sentence. + + :param tokens: Sequence of tokens to be tagged + :type tokens: list(str) + :param tagset: the tagset to be used, e.g. universal, wsj, brown + :type tagset: str + :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian + :type lang: str + :return: The tagged tokens + :rtype: list(tuple(str, str)) + """ + tagger = _get_tagger(lang) + return _pos_tag(tokens, tagset, tagger, lang) + + +def pos_tag_sents(sentences, tagset=None, lang="eng"): + """ + Use NLTK's currently recommended part of speech tagger to tag the + given list of sentences, each consisting of a list of tokens. + + :param sentences: List of sentences to be tagged + :type sentences: list(list(str)) + :param tagset: the tagset to be used, e.g. universal, wsj, brown + :type tagset: str + :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian + :type lang: str + :return: The list of tagged sentences + :rtype: list(list(tuple(str, str))) + """ + tagger = _get_tagger(lang) + return [_pos_tag(sent, tagset, tagger, lang) for sent in sentences] diff --git a/lib/python3.10/site-packages/nltk/tag/api.py b/lib/python3.10/site-packages/nltk/tag/api.py new file mode 100644 index 0000000000000000000000000000000000000000..27e45026cabe6d747f4b4a7dc108b7c3cec1c6f9 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/api.py @@ -0,0 +1,296 @@ +# Natural Language Toolkit: Tagger Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +""" +Interface for tagging each token in a sentence with supplementary +information, such as its part of speech. +""" +from abc import ABCMeta, abstractmethod +from functools import lru_cache +from itertools import chain +from typing import Dict + +from nltk.internals import deprecated, overridden +from nltk.metrics import ConfusionMatrix, accuracy +from nltk.tag.util import untag + + +class TaggerI(metaclass=ABCMeta): + """ + A processing interface for assigning a tag to each token in a list. + Tags are case sensitive strings that identify some property of each + token, such as its part of speech or its sense. + + Some taggers require specific types for their tokens. This is + generally indicated by the use of a sub-interface to ``TaggerI``. + For example, featureset taggers, which are subclassed from + ``FeaturesetTagger``, require that each token be a ``featureset``. + + Subclasses must define: + - either ``tag()`` or ``tag_sents()`` (or both) + """ + + @abstractmethod + def tag(self, tokens): + """ + Determine the most appropriate tag sequence for the given + token sequence, and return a corresponding list of tagged + tokens. A tagged token is encoded as a tuple ``(token, tag)``. + + :rtype: list(tuple(str, str)) + """ + if overridden(self.tag_sents): + return self.tag_sents([tokens])[0] + + def tag_sents(self, sentences): + """ + Apply ``self.tag()`` to each element of *sentences*. I.e.:: + + return [self.tag(sent) for sent in sentences] + """ + return [self.tag(sent) for sent in sentences] + + @deprecated("Use accuracy(gold) instead.") + def evaluate(self, gold): + return self.accuracy(gold) + + def accuracy(self, gold): + """ + Score the accuracy of the tagger against the gold standard. + Strip the tags from the gold standard text, retag it using + the tagger, then compute the accuracy score. + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :rtype: float + """ + + tagged_sents = self.tag_sents(untag(sent) for sent in gold) + gold_tokens = list(chain.from_iterable(gold)) + test_tokens = list(chain.from_iterable(tagged_sents)) + return accuracy(gold_tokens, test_tokens) + + @lru_cache(maxsize=1) + def _confusion_cached(self, gold): + """ + Inner function used after ``gold`` is converted to a + ``tuple(tuple(tuple(str, str)))``. That way, we can use caching on + creating a ConfusionMatrix. + + :param gold: The list of tagged sentences to run the tagger with, + also used as the reference values in the generated confusion matrix. + :type gold: tuple(tuple(tuple(str, str))) + :rtype: ConfusionMatrix + """ + + tagged_sents = self.tag_sents(untag(sent) for sent in gold) + gold_tokens = [token for _word, token in chain.from_iterable(gold)] + test_tokens = [token for _word, token in chain.from_iterable(tagged_sents)] + return ConfusionMatrix(gold_tokens, test_tokens) + + def confusion(self, gold): + """ + Return a ConfusionMatrix with the tags from ``gold`` as the reference + values, with the predictions from ``tag_sents`` as the predicted values. + + >>> from nltk.tag import PerceptronTagger + >>> from nltk.corpus import treebank + >>> tagger = PerceptronTagger() + >>> gold_data = treebank.tagged_sents()[:10] + >>> print(tagger.confusion(gold_data)) + | - | + | N | + | O P | + | N J J N N P P R R V V V V V W | + | ' E C C D E I J J J M N N N O R P R B R T V B B B B B D ` | + | ' , - . C D T X N J R S D N P S S P $ B R P O B D G N P Z T ` | + -------+----------------------------------------------------------------------------------------------+ + '' | <1> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | + , | .<15> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | + -NONE- | . . <.> . . 2 . . . 2 . . . 5 1 . . . . 2 . . . . . . . . . . . | + . | . . .<10> . . . . . . . . . . . . . . . . . . . . . . . . . . . | + CC | . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . . . . | + CD | . . . . . <5> . . . . . . . . . . . . . . . . . . . . . . . . . | + DT | . . . . . .<20> . . . . . . . . . . . . . . . . . . . . . . . . | + EX | . . . . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . | + IN | . . . . . . . .<22> . . . . . . . . . . 3 . . . . . . . . . . . | + JJ | . . . . . . . . .<16> . . . . 1 . . . . 1 . . . . . . . . . . . | + JJR | . . . . . . . . . . <.> . . . . . . . . . . . . . . . . . . . . | + JJS | . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . . | + MD | . . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . | + NN | . . . . . . . . . . . . .<28> 1 1 . . . . . . . . . . . . . . . | + NNP | . . . . . . . . . . . . . .<25> . . . . . . . . . . . . . . . . | + NNS | . . . . . . . . . . . . . . .<19> . . . . . . . . . . . . . . . | + POS | . . . . . . . . . . . . . . . . <1> . . . . . . . . . . . . . . | + PRP | . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . . . | + PRP$ | . . . . . . . . . . . . . . . . . . <2> . . . . . . . . . . . . | + RB | . . . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . | + RBR | . . . . . . . . . . 1 . . . . . . . . . <1> . . . . . . . . . . | + RP | . . . . . . . . . . . . . . . . . . . . . <1> . . . . . . . . . | + TO | . . . . . . . . . . . . . . . . . . . . . . <5> . . . . . . . . | + VB | . . . . . . . . . . . . . . . . . . . . . . . <3> . . . . . . . | + VBD | . . . . . . . . . . . . . 1 . . . . . . . . . . <6> . . . . . . | + VBG | . . . . . . . . . . . . . 1 . . . . . . . . . . . <4> . . . . . | + VBN | . . . . . . . . . . . . . . . . . . . . . . . . 1 . <4> . . . . | + VBP | . . . . . . . . . . . . . . . . . . . . . . . . . . . <3> . . . | + VBZ | . . . . . . . . . . . . . . . . . . . . . . . . . . . . <7> . . | + WDT | . . . . . . . . 2 . . . . . . . . . . . . . . . . . . . . <.> . | + `` | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <1>| + -------+----------------------------------------------------------------------------------------------+ + (row = reference; col = test) + + + :param gold: The list of tagged sentences to run the tagger with, + also used as the reference values in the generated confusion matrix. + :type gold: list(list(tuple(str, str))) + :rtype: ConfusionMatrix + """ + + return self._confusion_cached(tuple(tuple(sent) for sent in gold)) + + def recall(self, gold) -> Dict[str, float]: + """ + Compute the recall for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to recall. The recall is defined as: + + - *r* = true positive / (true positive + false positive) + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :return: A mapping from tags to recall + :rtype: Dict[str, float] + """ + + cm = self.confusion(gold) + return {tag: cm.recall(tag) for tag in cm._values} + + def precision(self, gold): + """ + Compute the precision for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to precision. The precision is defined as: + + - *p* = true positive / (true positive + false negative) + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :return: A mapping from tags to precision + :rtype: Dict[str, float] + """ + + cm = self.confusion(gold) + return {tag: cm.precision(tag) for tag in cm._values} + + def f_measure(self, gold, alpha=0.5): + """ + Compute the f-measure for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to f-measure. The f-measure is the harmonic mean + of the ``precision`` and ``recall``, weighted by ``alpha``. + In particular, given the precision *p* and recall *r* defined by: + + - *p* = true positive / (true positive + false negative) + - *r* = true positive / (true positive + false positive) + + The f-measure is: + + - *1/(alpha/p + (1-alpha)/r)* + + With ``alpha = 0.5``, this reduces to: + + - *2pr / (p + r)* + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :param alpha: Ratio of the cost of false negative compared to false + positives. Defaults to 0.5, where the costs are equal. + :type alpha: float + :return: A mapping from tags to precision + :rtype: Dict[str, float] + """ + cm = self.confusion(gold) + return {tag: cm.f_measure(tag, alpha) for tag in cm._values} + + def evaluate_per_tag(self, gold, alpha=0.5, truncate=None, sort_by_count=False): + """Tabulate the **recall**, **precision** and **f-measure** + for each tag from ``gold`` or from running ``tag`` on the tokenized + sentences from ``gold``. + + >>> from nltk.tag import PerceptronTagger + >>> from nltk.corpus import treebank + >>> tagger = PerceptronTagger() + >>> gold_data = treebank.tagged_sents()[:10] + >>> print(tagger.evaluate_per_tag(gold_data)) + Tag | Prec. | Recall | F-measure + -------+--------+--------+----------- + '' | 1.0000 | 1.0000 | 1.0000 + , | 1.0000 | 1.0000 | 1.0000 + -NONE- | 0.0000 | 0.0000 | 0.0000 + . | 1.0000 | 1.0000 | 1.0000 + CC | 1.0000 | 1.0000 | 1.0000 + CD | 0.7143 | 1.0000 | 0.8333 + DT | 1.0000 | 1.0000 | 1.0000 + EX | 1.0000 | 1.0000 | 1.0000 + IN | 0.9167 | 0.8800 | 0.8980 + JJ | 0.8889 | 0.8889 | 0.8889 + JJR | 0.0000 | 0.0000 | 0.0000 + JJS | 1.0000 | 1.0000 | 1.0000 + MD | 1.0000 | 1.0000 | 1.0000 + NN | 0.8000 | 0.9333 | 0.8615 + NNP | 0.8929 | 1.0000 | 0.9434 + NNS | 0.9500 | 1.0000 | 0.9744 + POS | 1.0000 | 1.0000 | 1.0000 + PRP | 1.0000 | 1.0000 | 1.0000 + PRP$ | 1.0000 | 1.0000 | 1.0000 + RB | 0.4000 | 1.0000 | 0.5714 + RBR | 1.0000 | 0.5000 | 0.6667 + RP | 1.0000 | 1.0000 | 1.0000 + TO | 1.0000 | 1.0000 | 1.0000 + VB | 1.0000 | 1.0000 | 1.0000 + VBD | 0.8571 | 0.8571 | 0.8571 + VBG | 1.0000 | 0.8000 | 0.8889 + VBN | 1.0000 | 0.8000 | 0.8889 + VBP | 1.0000 | 1.0000 | 1.0000 + VBZ | 1.0000 | 1.0000 | 1.0000 + WDT | 0.0000 | 0.0000 | 0.0000 + `` | 1.0000 | 1.0000 | 1.0000 + + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :param alpha: Ratio of the cost of false negative compared to false + positives, as used in the f-measure computation. Defaults to 0.5, + where the costs are equal. + :type alpha: float + :param truncate: If specified, then only show the specified + number of values. Any sorting (e.g., sort_by_count) + will be performed before truncation. Defaults to None + :type truncate: int, optional + :param sort_by_count: Whether to sort the outputs on number of + occurrences of that tag in the ``gold`` data, defaults to False + :type sort_by_count: bool, optional + :return: A tabulated recall, precision and f-measure string + :rtype: str + """ + cm = self.confusion(gold) + return cm.evaluate(alpha=alpha, truncate=truncate, sort_by_count=sort_by_count) + + def _check_params(self, train, model): + if (train and model) or (not train and not model): + raise ValueError("Must specify either training data or trained model.") + + +class FeaturesetTaggerI(TaggerI): + """ + A tagger that requires tokens to be ``featuresets``. A featureset + is a dictionary that maps from feature names to feature + values. See ``nltk.classify`` for more information about features + and featuresets. + """ diff --git a/lib/python3.10/site-packages/nltk/tag/brill.py b/lib/python3.10/site-packages/nltk/tag/brill.py new file mode 100644 index 0000000000000000000000000000000000000000..d3bd1cd3b6cb10c4b62b7d23910e2a8ba9568cd2 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/brill.py @@ -0,0 +1,449 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from collections import Counter, defaultdict + +from nltk import jsontags +from nltk.tag import TaggerI +from nltk.tbl import Feature, Template + +###################################################################### +# Brill Templates +###################################################################### + + +@jsontags.register_tag +class Word(Feature): + """ + Feature which examines the text (word) of nearby tokens. + """ + + json_tag = "nltk.tag.brill.Word" + + @staticmethod + def extract_property(tokens, index): + """@return: The given token's text.""" + return tokens[index][0] + + +@jsontags.register_tag +class Pos(Feature): + """ + Feature which examines the tags of nearby tokens. + """ + + json_tag = "nltk.tag.brill.Pos" + + @staticmethod + def extract_property(tokens, index): + """@return: The given token's tag.""" + return tokens[index][1] + + +def nltkdemo18(): + """ + Return 18 templates, from the original nltk demo, in multi-feature syntax + """ + return [ + Template(Pos([-1])), + Template(Pos([1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([-2, -1])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([1, 2, 3])), + Template(Pos([-1]), Pos([1])), + Template(Word([-1])), + Template(Word([1])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([-2, -1])), + Template(Word([1, 2])), + Template(Word([-3, -2, -1])), + Template(Word([1, 2, 3])), + Template(Word([-1]), Word([1])), + ] + + +def nltkdemo18plus(): + """ + Return 18 templates, from the original nltk demo, and additionally a few + multi-feature ones (the motivation is easy comparison with nltkdemo18) + """ + return nltkdemo18() + [ + Template(Word([-1]), Pos([1])), + Template(Pos([-1]), Word([1])), + Template(Word([-1]), Word([0]), Pos([1])), + Template(Pos([-1]), Word([0]), Word([1])), + Template(Pos([-1]), Word([0]), Pos([1])), + ] + + +def fntbl37(): + """ + Return 37 templates taken from the postagging task of the + fntbl distribution https://www.cs.jhu.edu/~rflorian/fntbl/ + (37 is after excluding a handful which do not condition on Pos[0]; + fntbl can do that but the current nltk implementation cannot.) + """ + return [ + Template(Word([0]), Word([1]), Word([2])), + Template(Word([-1]), Word([0]), Word([1])), + Template(Word([0]), Word([-1])), + Template(Word([0]), Word([1])), + Template(Word([0]), Word([2])), + Template(Word([0]), Word([-2])), + Template(Word([1, 2])), + Template(Word([-2, -1])), + Template(Word([1, 2, 3])), + Template(Word([-3, -2, -1])), + Template(Word([0]), Pos([2])), + Template(Word([0]), Pos([-2])), + Template(Word([0]), Pos([1])), + Template(Word([0]), Pos([-1])), + Template(Word([0])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([1])), + Template(Word([-1])), + Template(Pos([-1]), Pos([1])), + Template(Pos([1]), Pos([2])), + Template(Pos([-1]), Pos([-2])), + Template(Pos([1])), + Template(Pos([-1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([1, 2, 3])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([-2, -1])), + Template(Pos([1]), Word([0]), Word([1])), + Template(Pos([1]), Word([0]), Word([-1])), + Template(Pos([-1]), Word([-1]), Word([0])), + Template(Pos([-1]), Word([0]), Word([1])), + Template(Pos([-2]), Pos([-1])), + Template(Pos([1]), Pos([2])), + Template(Pos([1]), Pos([2]), Word([1])), + ] + + +def brill24(): + """ + Return 24 templates of the seminal TBL paper, Brill (1995) + """ + return [ + Template(Pos([-1])), + Template(Pos([1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([-2, -1])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([1, 2, 3])), + Template(Pos([-1]), Pos([1])), + Template(Pos([-2]), Pos([-1])), + Template(Pos([1]), Pos([2])), + Template(Word([-1])), + Template(Word([1])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([-2, -1])), + Template(Word([1, 2])), + Template(Word([-1, 0])), + Template(Word([0, 1])), + Template(Word([0])), + Template(Word([-1]), Pos([-1])), + Template(Word([1]), Pos([1])), + Template(Word([0]), Word([-1]), Pos([-1])), + Template(Word([0]), Word([1]), Pos([1])), + ] + + +def describe_template_sets(): + """ + Print the available template sets in this demo, with a short description" + """ + import inspect + import sys + + # a bit of magic to get all functions in this module + templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction) + for (name, obj) in templatesets: + if name == "describe_template_sets": + continue + print(name, obj.__doc__, "\n") + + +###################################################################### +# The Brill Tagger +###################################################################### + + +@jsontags.register_tag +class BrillTagger(TaggerI): + """ + Brill's transformational rule-based tagger. Brill taggers use an + initial tagger (such as ``tag.DefaultTagger``) to assign an initial + tag sequence to a text; and then apply an ordered list of + transformational rules to correct the tags of individual tokens. + These transformation rules are specified by the ``TagRule`` + interface. + + Brill taggers can be created directly, from an initial tagger and + a list of transformational rules; but more often, Brill taggers + are created by learning rules from a training corpus, using one + of the TaggerTrainers available. + """ + + json_tag = "nltk.tag.BrillTagger" + + def __init__(self, initial_tagger, rules, training_stats=None): + """ + :param initial_tagger: The initial tagger + :type initial_tagger: TaggerI + + :param rules: An ordered list of transformation rules that + should be used to correct the initial tagging. + :type rules: list(TagRule) + + :param training_stats: A dictionary of statistics collected + during training, for possible later use + :type training_stats: dict + + """ + self._initial_tagger = initial_tagger + self._rules = tuple(rules) + self._training_stats = training_stats + + def encode_json_obj(self): + return self._initial_tagger, self._rules, self._training_stats + + @classmethod + def decode_json_obj(cls, obj): + _initial_tagger, _rules, _training_stats = obj + return cls(_initial_tagger, _rules, _training_stats) + + def rules(self): + """ + Return the ordered list of transformation rules that this tagger has learnt + + :return: the ordered list of transformation rules that correct the initial tagging + :rtype: list of Rules + """ + return self._rules + + def train_stats(self, statistic=None): + """ + Return a named statistic collected during training, or a dictionary of all + available statistics if no name given + + :param statistic: name of statistic + :type statistic: str + :return: some statistic collected during training of this tagger + :rtype: any (but usually a number) + """ + if statistic is None: + return self._training_stats + else: + return self._training_stats.get(statistic) + + def tag(self, tokens): + # Inherit documentation from TaggerI + + # Run the initial tagger. + tagged_tokens = self._initial_tagger.tag(tokens) + + # Create a dictionary that maps each tag to a list of the + # indices of tokens that have that tag. + tag_to_positions = defaultdict(set) + for i, (token, tag) in enumerate(tagged_tokens): + tag_to_positions[tag].add(i) + + # Apply each rule, in order. Only try to apply rules at + # positions that have the desired original tag. + for rule in self._rules: + # Find the positions where it might apply + positions = tag_to_positions.get(rule.original_tag, []) + # Apply the rule at those positions. + changed = rule.apply(tagged_tokens, positions) + # Update tag_to_positions with the positions of tags that + # were modified. + for i in changed: + tag_to_positions[rule.original_tag].remove(i) + tag_to_positions[rule.replacement_tag].add(i) + + return tagged_tokens + + def print_template_statistics(self, test_stats=None, printunused=True): + """ + Print a list of all templates, ranked according to efficiency. + + If test_stats is available, the templates are ranked according to their + relative contribution (summed for all rules created from a given template, + weighted by score) to the performance on the test set. If no test_stats, then + statistics collected during training are used instead. There is also + an unweighted measure (just counting the rules). This is less informative, + though, as many low-score rules will appear towards end of training. + + :param test_stats: dictionary of statistics collected during testing + :type test_stats: dict of str -> any (but usually numbers) + :param printunused: if True, print a list of all unused templates + :type printunused: bool + :return: None + :rtype: None + """ + tids = [r.templateid for r in self._rules] + train_stats = self.train_stats() + + trainscores = train_stats["rulescores"] + assert len(trainscores) == len( + tids + ), "corrupt statistics: " "{} train scores for {} rules".format( + trainscores, tids + ) + template_counts = Counter(tids) + weighted_traincounts = Counter() + for (tid, score) in zip(tids, trainscores): + weighted_traincounts[tid] += score + tottrainscores = sum(trainscores) + + # det_tplsort() is for deterministic sorting; + # the otherwise convenient Counter.most_common() unfortunately + # does not break ties deterministically + # between python versions and will break cross-version tests + def det_tplsort(tpl_value): + return (tpl_value[1], repr(tpl_value[0])) + + def print_train_stats(): + print( + "TEMPLATE STATISTICS (TRAIN) {} templates, {} rules)".format( + len(template_counts), len(tids) + ) + ) + print( + "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f}".format(**train_stats) + ) + head = "#ID | Score (train) | #Rules | Template" + print(head, "\n", "-" * len(head), sep="") + train_tplscores = sorted( + weighted_traincounts.items(), key=det_tplsort, reverse=True + ) + for (tid, trainscore) in train_tplscores: + s = "{} | {:5d} {:5.3f} |{:4d} {:.3f} | {}".format( + tid, + trainscore, + trainscore / tottrainscores, + template_counts[tid], + template_counts[tid] / len(tids), + Template.ALLTEMPLATES[int(tid)], + ) + print(s) + + def print_testtrain_stats(): + testscores = test_stats["rulescores"] + print( + "TEMPLATE STATISTICS (TEST AND TRAIN) ({} templates, {} rules)".format( + len(template_counts), len(tids) + ) + ) + print( + "TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats) + ) + print( + "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats) + ) + weighted_testcounts = Counter() + for (tid, score) in zip(tids, testscores): + weighted_testcounts[tid] += score + tottestscores = sum(testscores) + head = "#ID | Score (test) | Score (train) | #Rules | Template" + print(head, "\n", "-" * len(head), sep="") + test_tplscores = sorted( + weighted_testcounts.items(), key=det_tplsort, reverse=True + ) + for (tid, testscore) in test_tplscores: + s = "{:s} |{:5d} {:6.3f} | {:4d} {:.3f} |{:4d} {:.3f} | {:s}".format( + tid, + testscore, + testscore / tottestscores, + weighted_traincounts[tid], + weighted_traincounts[tid] / tottrainscores, + template_counts[tid], + template_counts[tid] / len(tids), + Template.ALLTEMPLATES[int(tid)], + ) + print(s) + + def print_unused_templates(): + usedtpls = {int(tid) for tid in tids} + unused = [ + (tid, tpl) + for (tid, tpl) in enumerate(Template.ALLTEMPLATES) + if tid not in usedtpls + ] + print(f"UNUSED TEMPLATES ({len(unused)})") + + for (tid, tpl) in unused: + print(f"{tid:03d} {str(tpl):s}") + + if test_stats is None: + print_train_stats() + else: + print_testtrain_stats() + print() + if printunused: + print_unused_templates() + print() + + def batch_tag_incremental(self, sequences, gold): + """ + Tags by applying each rule to the entire corpus (rather than all rules to a + single sequence). The point is to collect statistics on the test set for + individual rules. + + NOTE: This is inefficient (does not build any index, so will traverse the entire + corpus N times for N rules) -- usually you would not care about statistics for + individual rules and thus use batch_tag() instead + + :param sequences: lists of token sequences (sentences, in some applications) to be tagged + :type sequences: list of list of strings + :param gold: the gold standard + :type gold: list of list of strings + :returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule)) + """ + + def counterrors(xs): + return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair)) + + testing_stats = {} + testing_stats["tokencount"] = sum(len(t) for t in sequences) + testing_stats["sequencecount"] = len(sequences) + tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences] + testing_stats["initialerrors"] = counterrors(tagged_tokenses) + testing_stats["initialacc"] = ( + 1 - testing_stats["initialerrors"] / testing_stats["tokencount"] + ) + # Apply each rule to the entire corpus, in order + errors = [testing_stats["initialerrors"]] + for rule in self._rules: + for tagged_tokens in tagged_tokenses: + rule.apply(tagged_tokens) + errors.append(counterrors(tagged_tokenses)) + testing_stats["rulescores"] = [ + err0 - err1 for (err0, err1) in zip(errors, errors[1:]) + ] + testing_stats["finalerrors"] = errors[-1] + testing_stats["finalacc"] = ( + 1 - testing_stats["finalerrors"] / testing_stats["tokencount"] + ) + return (tagged_tokenses, testing_stats) diff --git a/lib/python3.10/site-packages/nltk/tag/brill_trainer.py b/lib/python3.10/site-packages/nltk/tag/brill_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..236fd9858e755b501f3a8f384b68a383b6902f99 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/brill_trainer.py @@ -0,0 +1,629 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2013 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import bisect +import textwrap +from collections import defaultdict + +from nltk.tag import BrillTagger, untag + +###################################################################### +# Brill Tagger Trainer +###################################################################### + + +class BrillTaggerTrainer: + """ + A trainer for tbl taggers. + """ + + def __init__( + self, initial_tagger, templates, trace=0, deterministic=None, ruleformat="str" + ): + """ + Construct a Brill tagger from a baseline tagger and a + set of templates + + :param initial_tagger: the baseline tagger + :type initial_tagger: Tagger + :param templates: templates to be used in training + :type templates: list of Templates + :param trace: verbosity level + :type trace: int + :param deterministic: if True, adjudicate ties deterministically + :type deterministic: bool + :param ruleformat: format of reported Rules + :type ruleformat: str + :return: An untrained BrillTagger + :rtype: BrillTagger + """ + + if deterministic is None: + deterministic = trace > 0 + self._initial_tagger = initial_tagger + self._templates = templates + self._trace = trace + self._deterministic = deterministic + self._ruleformat = ruleformat + + self._tag_positions = None + """Mapping from tags to lists of positions that use that tag.""" + + self._rules_by_position = None + """Mapping from positions to the set of rules that are known + to occur at that position. Position is (sentnum, wordnum). + Initially, this will only contain positions where each rule + applies in a helpful way; but when we examine a rule, we'll + extend this list to also include positions where each rule + applies in a harmful or neutral way.""" + + self._positions_by_rule = None + """Mapping from rule to position to effect, specifying the + effect that each rule has on the overall score, at each + position. Position is (sentnum, wordnum); and effect is + -1, 0, or 1. As with _rules_by_position, this mapping starts + out only containing rules with positive effects; but when + we examine a rule, we'll extend this mapping to include + the positions where the rule is harmful or neutral.""" + + self._rules_by_score = None + """Mapping from scores to the set of rules whose effect on the + overall score is upper bounded by that score. Invariant: + rulesByScore[s] will contain r iff the sum of + _positions_by_rule[r] is s.""" + + self._rule_scores = None + """Mapping from rules to upper bounds on their effects on the + overall score. This is the inverse mapping to _rules_by_score. + Invariant: ruleScores[r] = sum(_positions_by_rule[r])""" + + self._first_unknown_position = None + """Mapping from rules to the first position where we're unsure + if the rule applies. This records the next position we + need to check to see if the rule messed anything up.""" + + # Training + + def train(self, train_sents, max_rules=200, min_score=2, min_acc=None): + r""" + Trains the Brill tagger on the corpus *train_sents*, + producing at most *max_rules* transformations, each of which + reduces the net number of errors in the corpus by at least + *min_score*, and each of which has accuracy not lower than + *min_acc*. + + >>> # Relevant imports + >>> from nltk.tbl.template import Template + >>> from nltk.tag.brill import Pos, Word + >>> from nltk.tag import untag, RegexpTagger, BrillTaggerTrainer + + >>> # Load some data + >>> from nltk.corpus import treebank + >>> training_data = treebank.tagged_sents()[:100] + >>> baseline_data = treebank.tagged_sents()[100:200] + >>> gold_data = treebank.tagged_sents()[200:300] + >>> testing_data = [untag(s) for s in gold_data] + + >>> backoff = RegexpTagger([ + ... (r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers + ... (r'(The|the|A|a|An|an)$', 'AT'), # articles + ... (r'.*able$', 'JJ'), # adjectives + ... (r'.*ness$', 'NN'), # nouns formed from adjectives + ... (r'.*ly$', 'RB'), # adverbs + ... (r'.*s$', 'NNS'), # plural nouns + ... (r'.*ing$', 'VBG'), # gerunds + ... (r'.*ed$', 'VBD'), # past tense verbs + ... (r'.*', 'NN') # nouns (default) + ... ]) + + >>> baseline = backoff #see NOTE1 + >>> baseline.accuracy(gold_data) #doctest: +ELLIPSIS + 0.243... + + >>> # Set up templates + >>> Template._cleartemplates() #clear any templates created in earlier tests + >>> templates = [Template(Pos([-1])), Template(Pos([-1]), Word([0]))] + + >>> # Construct a BrillTaggerTrainer + >>> tt = BrillTaggerTrainer(baseline, templates, trace=3) + + >>> tagger1 = tt.train(training_data, max_rules=10) + TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: None) + Finding initial useful rules... + Found 847 useful rules. + + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e + ------------------+------------------------------------------------------- + 132 132 0 0 | AT->DT if Pos:NN@[-1] + 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0] + 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0] + 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0] + 47 63 16 162 | NN->IN if Pos:NNS@[-1] + 33 33 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0] + 26 26 0 0 | IN->. if Pos:NNS@[-1] & Word:.@[0] + 24 24 0 0 | IN->, if Pos:NNS@[-1] & Word:,@[0] + 22 27 5 24 | NN->-NONE- if Pos:VBD@[-1] + 17 17 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0] + + >>> tagger1.rules()[1:3] + (Rule('001', 'NN', ',', [(Pos([-1]),'NN'), (Word([0]),',')]), Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')])) + + >>> train_stats = tagger1.train_stats() + >>> [train_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']] + [1776, 1270, [132, 85, 69, 51, 47, 33, 26, 24, 22, 17]] + + >>> tagger1.print_template_statistics(printunused=False) + TEMPLATE STATISTICS (TRAIN) 2 templates, 10 rules) + TRAIN ( 2417 tokens) initial 1776 0.2652 final: 1270 0.4746 + #ID | Score (train) | #Rules | Template + -------------------------------------------- + 001 | 305 0.603 | 7 0.700 | Template(Pos([-1]),Word([0])) + 000 | 201 0.397 | 3 0.300 | Template(Pos([-1])) + + + + >>> round(tagger1.accuracy(gold_data),5) + 0.43834 + + >>> tagged, test_stats = tagger1.batch_tag_incremental(testing_data, gold_data) + + >>> tagged[33][12:] == [('foreign', 'IN'), ('debt', 'NN'), ('of', 'IN'), ('$', 'NN'), ('64', 'CD'), + ... ('billion', 'NN'), ('*U*', 'NN'), ('--', 'NN'), ('the', 'DT'), ('third-highest', 'NN'), ('in', 'NN'), + ... ('the', 'DT'), ('developing', 'VBG'), ('world', 'NN'), ('.', '.')] + True + + >>> [test_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']] + [1859, 1380, [100, 85, 67, 58, 27, 36, 27, 16, 31, 32]] + + >>> # A high-accuracy tagger + >>> tagger2 = tt.train(training_data, max_rules=10, min_acc=0.99) + TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: 0.99) + Finding initial useful rules... + Found 847 useful rules. + + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e + ------------------+------------------------------------------------------- + 132 132 0 0 | AT->DT if Pos:NN@[-1] + 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0] + 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0] + 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0] + 36 36 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0] + 26 26 0 0 | NN->. if Pos:NNS@[-1] & Word:.@[0] + 24 24 0 0 | NN->, if Pos:NNS@[-1] & Word:,@[0] + 19 19 0 6 | NN->VB if Pos:TO@[-1] + 18 18 0 0 | CD->-NONE- if Pos:NN@[-1] & Word:0@[0] + 18 18 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0] + + >>> round(tagger2.accuracy(gold_data), 8) + 0.43996744 + + >>> tagger2.rules()[2:4] + (Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')]), Rule('001', 'NN', 'IN', [(Pos([-1]),'NN'), (Word([0]),'of')])) + + # NOTE1: (!!FIXME) A far better baseline uses nltk.tag.UnigramTagger, + # with a RegexpTagger only as backoff. For instance, + # >>> baseline = UnigramTagger(baseline_data, backoff=backoff) + # However, as of Nov 2013, nltk.tag.UnigramTagger does not yield consistent results + # between python versions. The simplistic backoff above is a workaround to make doctests + # get consistent input. + + :param train_sents: training data + :type train_sents: list(list(tuple)) + :param max_rules: output at most max_rules rules + :type max_rules: int + :param min_score: stop training when no rules better than min_score can be found + :type min_score: int + :param min_acc: discard any rule with lower accuracy than min_acc + :type min_acc: float or None + :return: the learned tagger + :rtype: BrillTagger + """ + # FIXME: several tests are a bit too dependent on tracing format + # FIXME: tests in trainer.fast and trainer.brillorig are exact duplicates + + # Basic idea: Keep track of the rules that apply at each position. + # And keep track of the positions to which each rule applies. + + # Create a new copy of the training corpus, and run the + # initial tagger on it. We will progressively update this + # test corpus to look more like the training corpus. + test_sents = [ + list(self._initial_tagger.tag(untag(sent))) for sent in train_sents + ] + + # Collect some statistics on the training process + trainstats = {} + trainstats["min_acc"] = min_acc + trainstats["min_score"] = min_score + trainstats["tokencount"] = sum(len(t) for t in test_sents) + trainstats["sequencecount"] = len(test_sents) + trainstats["templatecount"] = len(self._templates) + trainstats["rulescores"] = [] + trainstats["initialerrors"] = sum( + tag[1] != truth[1] + for paired in zip(test_sents, train_sents) + for (tag, truth) in zip(*paired) + ) + trainstats["initialacc"] = ( + 1 - trainstats["initialerrors"] / trainstats["tokencount"] + ) + if self._trace > 0: + print( + "TBL train (fast) (seqs: {sequencecount}; tokens: {tokencount}; " + "tpls: {templatecount}; min score: {min_score}; min acc: {min_acc})".format( + **trainstats + ) + ) + + # Initialize our mappings. This will find any errors made + # by the initial tagger, and use those to generate repair + # rules, which are added to the rule mappings. + if self._trace: + print("Finding initial useful rules...") + self._init_mappings(test_sents, train_sents) + if self._trace: + print(f" Found {len(self._rule_scores)} useful rules.") + + # Let the user know what we're up to. + if self._trace > 2: + self._trace_header() + elif self._trace == 1: + print("Selecting rules...") + + # Repeatedly select the best rule, and add it to `rules`. + rules = [] + try: + while len(rules) < max_rules: + # Find the best rule, and add it to our rule list. + rule = self._best_rule(train_sents, test_sents, min_score, min_acc) + if rule: + rules.append(rule) + score = self._rule_scores[rule] + trainstats["rulescores"].append(score) + else: + break # No more good rules left! + + # Report the rule that we found. + if self._trace > 1: + self._trace_rule(rule) + + # Apply the new rule at the relevant sites + self._apply_rule(rule, test_sents) + + # Update _tag_positions[rule.original_tag] and + # _tag_positions[rule.replacement_tag] for the affected + # positions (i.e., self._positions_by_rule[rule]). + self._update_tag_positions(rule) + + # Update rules that were affected by the change. + self._update_rules(rule, train_sents, test_sents) + + # The user can cancel training manually: + except KeyboardInterrupt: + print(f"Training stopped manually -- {len(rules)} rules found") + + # Discard our tag position mapping & rule mappings. + self._clean() + trainstats["finalerrors"] = trainstats["initialerrors"] - sum( + trainstats["rulescores"] + ) + trainstats["finalacc"] = ( + 1 - trainstats["finalerrors"] / trainstats["tokencount"] + ) + # Create and return a tagger from the rules we found. + return BrillTagger(self._initial_tagger, rules, trainstats) + + def _init_mappings(self, test_sents, train_sents): + """ + Initialize the tag position mapping & the rule related + mappings. For each error in test_sents, find new rules that + would correct them, and add them to the rule mappings. + """ + self._tag_positions = defaultdict(list) + self._rules_by_position = defaultdict(set) + self._positions_by_rule = defaultdict(dict) + self._rules_by_score = defaultdict(set) + self._rule_scores = defaultdict(int) + self._first_unknown_position = defaultdict(int) + # Scan through the corpus, initializing the tag_positions + # mapping and all the rule-related mappings. + for sentnum, sent in enumerate(test_sents): + for wordnum, (word, tag) in enumerate(sent): + + # Initialize tag_positions + self._tag_positions[tag].append((sentnum, wordnum)) + + # If it's an error token, update the rule-related mappings. + correct_tag = train_sents[sentnum][wordnum][1] + if tag != correct_tag: + for rule in self._find_rules(sent, wordnum, correct_tag): + self._update_rule_applies(rule, sentnum, wordnum, train_sents) + + def _clean(self): + self._tag_positions = None + self._rules_by_position = None + self._positions_by_rule = None + self._rules_by_score = None + self._rule_scores = None + self._first_unknown_position = None + + def _find_rules(self, sent, wordnum, new_tag): + """ + Use the templates to find rules that apply at index *wordnum* + in the sentence *sent* and generate the tag *new_tag*. + """ + for template in self._templates: + yield from template.applicable_rules(sent, wordnum, new_tag) + + def _update_rule_applies(self, rule, sentnum, wordnum, train_sents): + """ + Update the rule data tables to reflect the fact that + *rule* applies at the position *(sentnum, wordnum)*. + """ + pos = sentnum, wordnum + + # If the rule is already known to apply here, ignore. + # (This only happens if the position's tag hasn't changed.) + if pos in self._positions_by_rule[rule]: + return + + # Update self._positions_by_rule. + correct_tag = train_sents[sentnum][wordnum][1] + if rule.replacement_tag == correct_tag: + self._positions_by_rule[rule][pos] = 1 + elif rule.original_tag == correct_tag: + self._positions_by_rule[rule][pos] = -1 + else: # was wrong, remains wrong + self._positions_by_rule[rule][pos] = 0 + + # Update _rules_by_position + self._rules_by_position[pos].add(rule) + + # Update _rule_scores. + old_score = self._rule_scores[rule] + self._rule_scores[rule] += self._positions_by_rule[rule][pos] + + # Update _rules_by_score. + self._rules_by_score[old_score].discard(rule) + self._rules_by_score[self._rule_scores[rule]].add(rule) + + def _update_rule_not_applies(self, rule, sentnum, wordnum): + """ + Update the rule data tables to reflect the fact that *rule* + does not apply at the position *(sentnum, wordnum)*. + """ + pos = sentnum, wordnum + + # Update _rule_scores. + old_score = self._rule_scores[rule] + self._rule_scores[rule] -= self._positions_by_rule[rule][pos] + + # Update _rules_by_score. + self._rules_by_score[old_score].discard(rule) + self._rules_by_score[self._rule_scores[rule]].add(rule) + + # Update _positions_by_rule + del self._positions_by_rule[rule][pos] + self._rules_by_position[pos].remove(rule) + + # Optional addition: if the rule now applies nowhere, delete + # all its dictionary entries. + + def _best_rule(self, train_sents, test_sents, min_score, min_acc): + """ + Find the next best rule. This is done by repeatedly taking a + rule with the highest score and stepping through the corpus to + see where it applies. When it makes an error (decreasing its + score) it's bumped down, and we try a new rule with the + highest score. When we find a rule which has the highest + score *and* which has been tested against the entire corpus, we + can conclude that it's the next best rule. + """ + for max_score in sorted(self._rules_by_score.keys(), reverse=True): + if len(self._rules_by_score) == 0: + return None + if max_score < min_score or max_score <= 0: + return None + best_rules = list(self._rules_by_score[max_score]) + if self._deterministic: + best_rules.sort(key=repr) + for rule in best_rules: + positions = self._tag_positions[rule.original_tag] + + unk = self._first_unknown_position.get(rule, (0, -1)) + start = bisect.bisect_left(positions, unk) + + for i in range(start, len(positions)): + sentnum, wordnum = positions[i] + if rule.applies(test_sents[sentnum], wordnum): + self._update_rule_applies(rule, sentnum, wordnum, train_sents) + if self._rule_scores[rule] < max_score: + self._first_unknown_position[rule] = (sentnum, wordnum + 1) + break # The update demoted the rule. + + if self._rule_scores[rule] == max_score: + self._first_unknown_position[rule] = (len(train_sents) + 1, 0) + # optimization: if no min_acc threshold given, don't bother computing accuracy + if min_acc is None: + return rule + else: + changes = self._positions_by_rule[rule].values() + num_fixed = len([c for c in changes if c == 1]) + num_broken = len([c for c in changes if c == -1]) + # acc here is fixed/(fixed+broken); could also be + # fixed/(fixed+broken+other) == num_fixed/len(changes) + acc = num_fixed / (num_fixed + num_broken) + if acc >= min_acc: + return rule + # else: rule too inaccurate, discard and try next + + # We demoted (or skipped due to < min_acc, if that was given) + # all the rules with score==max_score. + + assert min_acc is not None or not self._rules_by_score[max_score] + if not self._rules_by_score[max_score]: + del self._rules_by_score[max_score] + + def _apply_rule(self, rule, test_sents): + """ + Update *test_sents* by applying *rule* everywhere where its + conditions are met. + """ + update_positions = set(self._positions_by_rule[rule]) + new_tag = rule.replacement_tag + + if self._trace > 3: + self._trace_apply(len(update_positions)) + + # Update test_sents. + for (sentnum, wordnum) in update_positions: + text = test_sents[sentnum][wordnum][0] + test_sents[sentnum][wordnum] = (text, new_tag) + + def _update_tag_positions(self, rule): + """ + Update _tag_positions to reflect the changes to tags that are + made by *rule*. + """ + # Update the tag index. + for pos in self._positions_by_rule[rule]: + # Delete the old tag. + old_tag_positions = self._tag_positions[rule.original_tag] + old_index = bisect.bisect_left(old_tag_positions, pos) + del old_tag_positions[old_index] + # Insert the new tag. + new_tag_positions = self._tag_positions[rule.replacement_tag] + bisect.insort_left(new_tag_positions, pos) + + def _update_rules(self, rule, train_sents, test_sents): + """ + Check if we should add or remove any rules from consideration, + given the changes made by *rule*. + """ + # Collect a list of all positions that might be affected. + neighbors = set() + for sentnum, wordnum in self._positions_by_rule[rule]: + for template in self._templates: + n = template.get_neighborhood(test_sents[sentnum], wordnum) + neighbors.update([(sentnum, i) for i in n]) + + # Update the rules at each position. + num_obsolete = num_new = num_unseen = 0 + for sentnum, wordnum in neighbors: + test_sent = test_sents[sentnum] + correct_tag = train_sents[sentnum][wordnum][1] + + # Check if the change causes any rule at this position to + # stop matching; if so, then update our rule mappings + # accordingly. + old_rules = set(self._rules_by_position[sentnum, wordnum]) + for old_rule in old_rules: + if not old_rule.applies(test_sent, wordnum): + num_obsolete += 1 + self._update_rule_not_applies(old_rule, sentnum, wordnum) + + # Check if the change causes our templates to propose any + # new rules for this position. + for template in self._templates: + for new_rule in template.applicable_rules( + test_sent, wordnum, correct_tag + ): + if new_rule not in old_rules: + num_new += 1 + if new_rule not in self._rule_scores: + num_unseen += 1 + old_rules.add(new_rule) + self._update_rule_applies( + new_rule, sentnum, wordnum, train_sents + ) + + # We may have caused other rules to match here, that are + # not proposed by our templates -- in particular, rules + # that are harmful or neutral. We therefore need to + # update any rule whose first_unknown_position is past + # this rule. + for new_rule, pos in self._first_unknown_position.items(): + if pos > (sentnum, wordnum): + if new_rule not in old_rules: + num_new += 1 + if new_rule.applies(test_sent, wordnum): + self._update_rule_applies( + new_rule, sentnum, wordnum, train_sents + ) + + if self._trace > 3: + self._trace_update_rules(num_obsolete, num_new, num_unseen) + + # Tracing + + def _trace_header(self): + print( + """ + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e +------------------+------------------------------------------------------- + """.rstrip() + ) + + def _trace_rule(self, rule): + assert self._rule_scores[rule] == sum(self._positions_by_rule[rule].values()) + + changes = self._positions_by_rule[rule].values() + num_fixed = len([c for c in changes if c == 1]) + num_broken = len([c for c in changes if c == -1]) + num_other = len([c for c in changes if c == 0]) + score = self._rule_scores[rule] + + rulestr = rule.format(self._ruleformat) + if self._trace > 2: + print( + "{:4d}{:4d}{:4d}{:4d} |".format( + score, num_fixed, num_broken, num_other + ), + end=" ", + ) + print( + textwrap.fill( + rulestr, + initial_indent=" " * 20, + width=79, + subsequent_indent=" " * 18 + "| ", + ).strip() + ) + else: + print(rulestr) + + def _trace_apply(self, num_updates): + prefix = " " * 18 + "|" + print(prefix) + print(prefix, f"Applying rule to {num_updates} positions.") + + def _trace_update_rules(self, num_obsolete, num_new, num_unseen): + prefix = " " * 18 + "|" + print(prefix, "Updated rule tables:") + print(prefix, (f" - {num_obsolete} rule applications removed")) + print( + prefix, + (f" - {num_new} rule applications added ({num_unseen} novel)"), + ) + print(prefix) diff --git a/lib/python3.10/site-packages/nltk/tag/crf.py b/lib/python3.10/site-packages/nltk/tag/crf.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc728c8d55c5eecadd7dc214f756f5224b7f017 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/crf.py @@ -0,0 +1,207 @@ +# Natural Language Toolkit: Interface to the CRFSuite Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Long Duong +# URL: +# For license information, see LICENSE.TXT + +""" +A module for POS tagging using CRFSuite +""" + +import re +import unicodedata + +from nltk.tag.api import TaggerI + +try: + import pycrfsuite +except ImportError: + pass + + +class CRFTagger(TaggerI): + """ + A module for POS tagging using CRFSuite https://pypi.python.org/pypi/python-crfsuite + + >>> from nltk.tag import CRFTagger + >>> ct = CRFTagger() # doctest: +SKIP + + >>> train_data = [[('University','Noun'), ('is','Verb'), ('a','Det'), ('good','Adj'), ('place','Noun')], + ... [('dog','Noun'),('eat','Verb'),('meat','Noun')]] + + >>> ct.train(train_data,'model.crf.tagger') # doctest: +SKIP + >>> ct.tag_sents([['dog','is','good'], ['Cat','eat','meat']]) # doctest: +SKIP + [[('dog', 'Noun'), ('is', 'Verb'), ('good', 'Adj')], [('Cat', 'Noun'), ('eat', 'Verb'), ('meat', 'Noun')]] + + >>> gold_sentences = [[('dog','Noun'),('is','Verb'),('good','Adj')] , [('Cat','Noun'),('eat','Verb'), ('meat','Noun')]] + >>> ct.accuracy(gold_sentences) # doctest: +SKIP + 1.0 + + Setting learned model file + >>> ct = CRFTagger() # doctest: +SKIP + >>> ct.set_model_file('model.crf.tagger') # doctest: +SKIP + >>> ct.accuracy(gold_sentences) # doctest: +SKIP + 1.0 + """ + + def __init__(self, feature_func=None, verbose=False, training_opt={}): + """ + Initialize the CRFSuite tagger + + :param feature_func: The function that extracts features for each token of a sentence. This function should take + 2 parameters: tokens and index which extract features at index position from tokens list. See the build in + _get_features function for more detail. + :param verbose: output the debugging messages during training. + :type verbose: boolean + :param training_opt: python-crfsuite training options + :type training_opt: dictionary + + Set of possible training options (using LBFGS training algorithm). + :'feature.minfreq': The minimum frequency of features. + :'feature.possible_states': Force to generate possible state features. + :'feature.possible_transitions': Force to generate possible transition features. + :'c1': Coefficient for L1 regularization. + :'c2': Coefficient for L2 regularization. + :'max_iterations': The maximum number of iterations for L-BFGS optimization. + :'num_memories': The number of limited memories for approximating the inverse hessian matrix. + :'epsilon': Epsilon for testing the convergence of the objective. + :'period': The duration of iterations to test the stopping criterion. + :'delta': The threshold for the stopping criterion; an L-BFGS iteration stops when the + improvement of the log likelihood over the last ${period} iterations is no greater than this threshold. + :'linesearch': The line search algorithm used in L-BFGS updates: + + - 'MoreThuente': More and Thuente's method, + - 'Backtracking': Backtracking method with regular Wolfe condition, + - 'StrongBacktracking': Backtracking method with strong Wolfe condition + :'max_linesearch': The maximum number of trials for the line search algorithm. + """ + + self._model_file = "" + self._tagger = pycrfsuite.Tagger() + + if feature_func is None: + self._feature_func = self._get_features + else: + self._feature_func = feature_func + + self._verbose = verbose + self._training_options = training_opt + self._pattern = re.compile(r"\d") + + def set_model_file(self, model_file): + self._model_file = model_file + self._tagger.open(self._model_file) + + def _get_features(self, tokens, idx): + """ + Extract basic features about this word including + - Current word + - is it capitalized? + - Does it have punctuation? + - Does it have a number? + - Suffixes up to length 3 + + Note that : we might include feature over previous word, next word etc. + + :return: a list which contains the features + :rtype: list(str) + """ + token = tokens[idx] + + feature_list = [] + + if not token: + return feature_list + + # Capitalization + if token[0].isupper(): + feature_list.append("CAPITALIZATION") + + # Number + if re.search(self._pattern, token) is not None: + feature_list.append("HAS_NUM") + + # Punctuation + punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"} + if all(unicodedata.category(x) in punc_cat for x in token): + feature_list.append("PUNCTUATION") + + # Suffix up to length 3 + if len(token) > 1: + feature_list.append("SUF_" + token[-1:]) + if len(token) > 2: + feature_list.append("SUF_" + token[-2:]) + if len(token) > 3: + feature_list.append("SUF_" + token[-3:]) + + feature_list.append("WORD_" + token) + + return feature_list + + def tag_sents(self, sents): + """ + Tag a list of sentences. NB before using this function, user should specify the mode_file either by + + - Train a new model using ``train`` function + - Use the pre-trained model which is set via ``set_model_file`` function + + :params sentences: list of sentences needed to tag. + :type sentences: list(list(str)) + :return: list of tagged sentences. + :rtype: list(list(tuple(str,str))) + """ + if self._model_file == "": + raise Exception( + " No model file is found !! Please use train or set_model_file function" + ) + + # We need the list of sentences instead of the list generator for matching the input and output + result = [] + for tokens in sents: + features = [self._feature_func(tokens, i) for i in range(len(tokens))] + labels = self._tagger.tag(features) + + if len(labels) != len(tokens): + raise Exception(" Predicted Length Not Matched, Expect Errors !") + + tagged_sent = list(zip(tokens, labels)) + result.append(tagged_sent) + + return result + + def train(self, train_data, model_file): + """ + Train the CRF tagger using CRFSuite + :params train_data : is the list of annotated sentences. + :type train_data : list (list(tuple(str,str))) + :params model_file : the model will be saved to this file. + + """ + trainer = pycrfsuite.Trainer(verbose=self._verbose) + trainer.set_params(self._training_options) + + for sent in train_data: + tokens, labels = zip(*sent) + features = [self._feature_func(tokens, i) for i in range(len(tokens))] + trainer.append(features, labels) + + # Now train the model, the output should be model_file + trainer.train(model_file) + # Save the model file + self.set_model_file(model_file) + + def tag(self, tokens): + """ + Tag a sentence using Python CRFSuite Tagger. NB before using this function, user should specify the mode_file either by + + - Train a new model using ``train`` function + - Use the pre-trained model which is set via ``set_model_file`` function + + :params tokens: list of tokens needed to tag. + :type tokens: list(str) + :return: list of tagged tokens. + :rtype: list(tuple(str,str)) + """ + + return self.tag_sents([tokens])[0] diff --git a/lib/python3.10/site-packages/nltk/tag/hmm.py b/lib/python3.10/site-packages/nltk/tag/hmm.py new file mode 100644 index 0000000000000000000000000000000000000000..6577789b883828ce01e84c0864de57eead81f12b --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/hmm.py @@ -0,0 +1,1329 @@ +# Natural Language Toolkit: Hidden Markov Model +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Philip Blunsom +# Tiago Tresoldi (fixes) +# Steven Bird (fixes) +# Joseph Frazee (fixes) +# Steven Xu (fixes) +# URL: +# For license information, see LICENSE.TXT + +""" +Hidden Markov Models (HMMs) largely used to assign the correct label sequence +to sequential data or assess the probability of a given label and data +sequence. These models are finite state machines characterised by a number of +states, transitions between these states, and output symbols emitted while in +each state. The HMM is an extension to the Markov chain, where each state +corresponds deterministically to a given event. In the HMM the observation is +a probabilistic function of the state. HMMs share the Markov chain's +assumption, being that the probability of transition from one state to another +only depends on the current state - i.e. the series of states that led to the +current state are not used. They are also time invariant. + +The HMM is a directed graph, with probability weighted edges (representing the +probability of a transition between the source and sink states) where each +vertex emits an output symbol when entered. The symbol (or observation) is +non-deterministically generated. For this reason, knowing that a sequence of +output observations was generated by a given HMM does not mean that the +corresponding sequence of states (and what the current state is) is known. +This is the 'hidden' in the hidden markov model. + +Formally, a HMM can be characterised by: + +- the output observation alphabet. This is the set of symbols which may be + observed as output of the system. +- the set of states. +- the transition probabilities *a_{ij} = P(s_t = j | s_{t-1} = i)*. These + represent the probability of transition to each state from a given state. +- the output probability matrix *b_i(k) = P(X_t = o_k | s_t = i)*. These + represent the probability of observing each symbol in a given state. +- the initial state distribution. This gives the probability of starting + in each state. + +To ground this discussion, take a common NLP application, part-of-speech (POS) +tagging. An HMM is desirable for this task as the highest probability tag +sequence can be calculated for a given sequence of word forms. This differs +from other tagging techniques which often tag each word individually, seeking +to optimise each individual tagging greedily without regard to the optimal +combination of tags for a larger unit, such as a sentence. The HMM does this +with the Viterbi algorithm, which efficiently computes the optimal path +through the graph given the sequence of words forms. + +In POS tagging the states usually have a 1:1 correspondence with the tag +alphabet - i.e. each state represents a single tag. The output observation +alphabet is the set of word forms (the lexicon), and the remaining three +parameters are derived by a training regime. With this information the +probability of a given sentence can be easily derived, by simply summing the +probability of each distinct path through the model. Similarly, the highest +probability tagging sequence can be derived with the Viterbi algorithm, +yielding a state sequence which can be mapped into a tag sequence. + +This discussion assumes that the HMM has been trained. This is probably the +most difficult task with the model, and requires either MLE estimates of the +parameters or unsupervised learning using the Baum-Welch algorithm, a variant +of EM. + +For more information, please consult the source code for this module, +which includes extensive demonstration code. +""" + +import itertools +import re + +try: + import numpy as np +except ImportError: + pass + +from nltk.metrics import accuracy +from nltk.probability import ( + ConditionalFreqDist, + ConditionalProbDist, + DictionaryConditionalProbDist, + DictionaryProbDist, + FreqDist, + LidstoneProbDist, + MLEProbDist, + MutableProbDist, + RandomProbDist, +) +from nltk.tag.api import TaggerI +from nltk.util import LazyMap, unique_list + +_TEXT = 0 # index of text in a tuple +_TAG = 1 # index of tag in a tuple + + +def _identity(labeled_symbols): + return labeled_symbols + + +class HiddenMarkovModelTagger(TaggerI): + """ + Hidden Markov model class, a generative model for labelling sequence data. + These models define the joint probability of a sequence of symbols and + their labels (state transitions) as the product of the starting state + probability, the probability of each state transition, and the probability + of each observation being generated from each state. This is described in + more detail in the module documentation. + + This implementation is based on the HMM description in Chapter 8, Huang, + Acero and Hon, Spoken Language Processing and includes an extension for + training shallow HMM parsers or specialized HMMs as in Molina et. + al, 2002. A specialized HMM modifies training data by applying a + specialization function to create a new training set that is more + appropriate for sequential tagging with an HMM. A typical use case is + chunking. + + :param symbols: the set of output symbols (alphabet) + :type symbols: seq of any + :param states: a set of states representing state space + :type states: seq of any + :param transitions: transition probabilities; Pr(s_i | s_j) is the + probability of transition from state i given the model is in + state_j + :type transitions: ConditionalProbDistI + :param outputs: output probabilities; Pr(o_k | s_i) is the probability + of emitting symbol k when entering state i + :type outputs: ConditionalProbDistI + :param priors: initial state distribution; Pr(s_i) is the probability + of starting in state i + :type priors: ProbDistI + :param transform: an optional function for transforming training + instances, defaults to the identity function. + :type transform: callable + """ + + def __init__( + self, symbols, states, transitions, outputs, priors, transform=_identity + ): + self._symbols = unique_list(symbols) + self._states = unique_list(states) + self._transitions = transitions + self._outputs = outputs + self._priors = priors + self._cache = None + self._transform = transform + + @classmethod + def _train( + cls, + labeled_sequence, + test_sequence=None, + unlabeled_sequence=None, + transform=_identity, + estimator=None, + **kwargs, + ): + + if estimator is None: + + def estimator(fd, bins): + return LidstoneProbDist(fd, 0.1, bins) + + labeled_sequence = LazyMap(transform, labeled_sequence) + symbols = unique_list(word for sent in labeled_sequence for word, tag in sent) + tag_set = unique_list(tag for sent in labeled_sequence for word, tag in sent) + + trainer = HiddenMarkovModelTrainer(tag_set, symbols) + hmm = trainer.train_supervised(labeled_sequence, estimator=estimator) + hmm = cls( + hmm._symbols, + hmm._states, + hmm._transitions, + hmm._outputs, + hmm._priors, + transform=transform, + ) + + if test_sequence: + hmm.test(test_sequence, verbose=kwargs.get("verbose", False)) + + if unlabeled_sequence: + max_iterations = kwargs.get("max_iterations", 5) + hmm = trainer.train_unsupervised( + unlabeled_sequence, model=hmm, max_iterations=max_iterations + ) + if test_sequence: + hmm.test(test_sequence, verbose=kwargs.get("verbose", False)) + + return hmm + + @classmethod + def train( + cls, labeled_sequence, test_sequence=None, unlabeled_sequence=None, **kwargs + ): + """ + Train a new HiddenMarkovModelTagger using the given labeled and + unlabeled training instances. Testing will be performed if test + instances are provided. + + :return: a hidden markov model tagger + :rtype: HiddenMarkovModelTagger + :param labeled_sequence: a sequence of labeled training instances, + i.e. a list of sentences represented as tuples + :type labeled_sequence: list(list) + :param test_sequence: a sequence of labeled test instances + :type test_sequence: list(list) + :param unlabeled_sequence: a sequence of unlabeled training instances, + i.e. a list of sentences represented as words + :type unlabeled_sequence: list(list) + :param transform: an optional function for transforming training + instances, defaults to the identity function, see ``transform()`` + :type transform: function + :param estimator: an optional function or class that maps a + condition's frequency distribution to its probability + distribution, defaults to a Lidstone distribution with gamma = 0.1 + :type estimator: class or function + :param verbose: boolean flag indicating whether training should be + verbose or include printed output + :type verbose: bool + :param max_iterations: number of Baum-Welch iterations to perform + :type max_iterations: int + """ + return cls._train(labeled_sequence, test_sequence, unlabeled_sequence, **kwargs) + + def probability(self, sequence): + """ + Returns the probability of the given symbol sequence. If the sequence + is labelled, then returns the joint probability of the symbol, state + sequence. Otherwise, uses the forward algorithm to find the + probability over all label sequences. + + :return: the probability of the sequence + :rtype: float + :param sequence: the sequence of symbols which must contain the TEXT + property, and optionally the TAG property + :type sequence: Token + """ + return 2 ** (self.log_probability(self._transform(sequence))) + + def log_probability(self, sequence): + """ + Returns the log-probability of the given symbol sequence. If the + sequence is labelled, then returns the joint log-probability of the + symbol, state sequence. Otherwise, uses the forward algorithm to find + the log-probability over all label sequences. + + :return: the log-probability of the sequence + :rtype: float + :param sequence: the sequence of symbols which must contain the TEXT + property, and optionally the TAG property + :type sequence: Token + """ + sequence = self._transform(sequence) + + T = len(sequence) + + if T > 0 and sequence[0][_TAG]: + last_state = sequence[0][_TAG] + p = self._priors.logprob(last_state) + self._output_logprob( + last_state, sequence[0][_TEXT] + ) + for t in range(1, T): + state = sequence[t][_TAG] + p += self._transitions[last_state].logprob( + state + ) + self._output_logprob(state, sequence[t][_TEXT]) + last_state = state + return p + else: + alpha = self._forward_probability(sequence) + p = logsumexp2(alpha[T - 1]) + return p + + def tag(self, unlabeled_sequence): + """ + Tags the sequence with the highest probability state sequence. This + uses the best_path method to find the Viterbi path. + + :return: a labelled sequence of symbols + :rtype: list + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._tag(unlabeled_sequence) + + def _tag(self, unlabeled_sequence): + path = self._best_path(unlabeled_sequence) + return list(zip(unlabeled_sequence, path)) + + def _output_logprob(self, state, symbol): + """ + :return: the log probability of the symbol being observed in the given + state + :rtype: float + """ + return self._outputs[state].logprob(symbol) + + def _create_cache(self): + """ + The cache is a tuple (P, O, X, S) where: + + - S maps symbols to integers. I.e., it is the inverse + mapping from self._symbols; for each symbol s in + self._symbols, the following is true:: + + self._symbols[S[s]] == s + + - O is the log output probabilities:: + + O[i,k] = log( P(token[t]=sym[k]|tag[t]=state[i]) ) + + - X is the log transition probabilities:: + + X[i,j] = log( P(tag[t]=state[j]|tag[t-1]=state[i]) ) + + - P is the log prior probabilities:: + + P[i] = log( P(tag[0]=state[i]) ) + """ + if not self._cache: + N = len(self._states) + M = len(self._symbols) + P = np.zeros(N, np.float32) + X = np.zeros((N, N), np.float32) + O = np.zeros((N, M), np.float32) + for i in range(N): + si = self._states[i] + P[i] = self._priors.logprob(si) + for j in range(N): + X[i, j] = self._transitions[si].logprob(self._states[j]) + for k in range(M): + O[i, k] = self._output_logprob(si, self._symbols[k]) + S = {} + for k in range(M): + S[self._symbols[k]] = k + self._cache = (P, O, X, S) + + def _update_cache(self, symbols): + # add new symbols to the symbol table and repopulate the output + # probabilities and symbol table mapping + if symbols: + self._create_cache() + P, O, X, S = self._cache + for symbol in symbols: + if symbol not in self._symbols: + self._cache = None + self._symbols.append(symbol) + # don't bother with the work if there aren't any new symbols + if not self._cache: + N = len(self._states) + M = len(self._symbols) + Q = O.shape[1] + # add new columns to the output probability table without + # destroying the old probabilities + O = np.hstack([O, np.zeros((N, M - Q), np.float32)]) + for i in range(N): + si = self._states[i] + # only calculate probabilities for new symbols + for k in range(Q, M): + O[i, k] = self._output_logprob(si, self._symbols[k]) + # only create symbol mappings for new symbols + for k in range(Q, M): + S[self._symbols[k]] = k + self._cache = (P, O, X, S) + + def reset_cache(self): + self._cache = None + + def best_path(self, unlabeled_sequence): + """ + Returns the state sequence of the optimal (most probable) path through + the HMM. Uses the Viterbi algorithm to calculate this part by dynamic + programming. + + :return: the state sequence + :rtype: sequence of any + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._best_path(unlabeled_sequence) + + def _best_path(self, unlabeled_sequence): + T = len(unlabeled_sequence) + N = len(self._states) + self._create_cache() + self._update_cache(unlabeled_sequence) + P, O, X, S = self._cache + + V = np.zeros((T, N), np.float32) + B = -np.ones((T, N), int) + + V[0] = P + O[:, S[unlabeled_sequence[0]]] + for t in range(1, T): + for j in range(N): + vs = V[t - 1, :] + X[:, j] + best = np.argmax(vs) + V[t, j] = vs[best] + O[j, S[unlabeled_sequence[t]]] + B[t, j] = best + + current = np.argmax(V[T - 1, :]) + sequence = [current] + for t in range(T - 1, 0, -1): + last = B[t, current] + sequence.append(last) + current = last + + sequence.reverse() + return list(map(self._states.__getitem__, sequence)) + + def best_path_simple(self, unlabeled_sequence): + """ + Returns the state sequence of the optimal (most probable) path through + the HMM. Uses the Viterbi algorithm to calculate this part by dynamic + programming. This uses a simple, direct method, and is included for + teaching purposes. + + :return: the state sequence + :rtype: sequence of any + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._best_path_simple(unlabeled_sequence) + + def _best_path_simple(self, unlabeled_sequence): + T = len(unlabeled_sequence) + N = len(self._states) + V = np.zeros((T, N), np.float64) + B = {} + + # find the starting log probabilities for each state + symbol = unlabeled_sequence[0] + for i, state in enumerate(self._states): + V[0, i] = self._priors.logprob(state) + self._output_logprob(state, symbol) + B[0, state] = None + + # find the maximum log probabilities for reaching each state at time t + for t in range(1, T): + symbol = unlabeled_sequence[t] + for j in range(N): + sj = self._states[j] + best = None + for i in range(N): + si = self._states[i] + va = V[t - 1, i] + self._transitions[si].logprob(sj) + if not best or va > best[0]: + best = (va, si) + V[t, j] = best[0] + self._output_logprob(sj, symbol) + B[t, sj] = best[1] + + # find the highest probability final state + best = None + for i in range(N): + val = V[T - 1, i] + if not best or val > best[0]: + best = (val, self._states[i]) + + # traverse the back-pointers B to find the state sequence + current = best[1] + sequence = [current] + for t in range(T - 1, 0, -1): + last = B[t, current] + sequence.append(last) + current = last + + sequence.reverse() + return sequence + + def random_sample(self, rng, length): + """ + Randomly sample the HMM to generate a sentence of a given length. This + samples the prior distribution then the observation distribution and + transition distribution for each subsequent observation and state. + This will mostly generate unintelligible garbage, but can provide some + amusement. + + :return: the randomly created state/observation sequence, + generated according to the HMM's probability + distributions. The SUBTOKENS have TEXT and TAG + properties containing the observation and state + respectively. + :rtype: list + :param rng: random number generator + :type rng: Random (or any object with a random() method) + :param length: desired output length + :type length: int + """ + + # sample the starting state and symbol prob dists + tokens = [] + state = self._sample_probdist(self._priors, rng.random(), self._states) + symbol = self._sample_probdist( + self._outputs[state], rng.random(), self._symbols + ) + tokens.append((symbol, state)) + + for i in range(1, length): + # sample the state transition and symbol prob dists + state = self._sample_probdist( + self._transitions[state], rng.random(), self._states + ) + symbol = self._sample_probdist( + self._outputs[state], rng.random(), self._symbols + ) + tokens.append((symbol, state)) + + return tokens + + def _sample_probdist(self, probdist, p, samples): + cum_p = 0 + for sample in samples: + add_p = probdist.prob(sample) + if cum_p <= p <= cum_p + add_p: + return sample + cum_p += add_p + raise Exception("Invalid probability distribution - " "does not sum to one") + + def entropy(self, unlabeled_sequence): + """ + Returns the entropy over labellings of the given sequence. This is + given by:: + + H(O) = - sum_S Pr(S | O) log Pr(S | O) + + where the summation ranges over all state sequences, S. Let + *Z = Pr(O) = sum_S Pr(S, O)}* where the summation ranges over all state + sequences and O is the observation sequence. As such the entropy can + be re-expressed as:: + + H = - sum_S Pr(S | O) log [ Pr(S, O) / Z ] + = log Z - sum_S Pr(S | O) log Pr(S, 0) + = log Z - sum_S Pr(S | O) [ log Pr(S_0) + sum_t Pr(S_t | S_{t-1}) + sum_t Pr(O_t | S_t) ] + + The order of summation for the log terms can be flipped, allowing + dynamic programming to be used to calculate the entropy. Specifically, + we use the forward and backward probabilities (alpha, beta) giving:: + + H = log Z - sum_s0 alpha_0(s0) beta_0(s0) / Z * log Pr(s0) + + sum_t,si,sj alpha_t(si) Pr(sj | si) Pr(O_t+1 | sj) beta_t(sj) / Z * log Pr(sj | si) + + sum_t,st alpha_t(st) beta_t(st) / Z * log Pr(O_t | st) + + This simply uses alpha and beta to find the probabilities of partial + sequences, constrained to include the given state(s) at some point in + time. + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + alpha = self._forward_probability(unlabeled_sequence) + beta = self._backward_probability(unlabeled_sequence) + normalisation = logsumexp2(alpha[T - 1]) + + entropy = normalisation + + # starting state, t = 0 + for i, state in enumerate(self._states): + p = 2 ** (alpha[0, i] + beta[0, i] - normalisation) + entropy -= p * self._priors.logprob(state) + # print('p(s_0 = %s) =' % state, p) + + # state transitions + for t0 in range(T - 1): + t1 = t0 + 1 + for i0, s0 in enumerate(self._states): + for i1, s1 in enumerate(self._states): + p = 2 ** ( + alpha[t0, i0] + + self._transitions[s0].logprob(s1) + + self._outputs[s1].logprob(unlabeled_sequence[t1][_TEXT]) + + beta[t1, i1] + - normalisation + ) + entropy -= p * self._transitions[s0].logprob(s1) + # print('p(s_%d = %s, s_%d = %s) =' % (t0, s0, t1, s1), p) + + # symbol emissions + for t in range(T): + for i, state in enumerate(self._states): + p = 2 ** (alpha[t, i] + beta[t, i] - normalisation) + entropy -= p * self._outputs[state].logprob( + unlabeled_sequence[t][_TEXT] + ) + # print('p(s_%d = %s) =' % (t, state), p) + + return entropy + + def point_entropy(self, unlabeled_sequence): + """ + Returns the pointwise entropy over the possible states at each + position in the chain, given the observation sequence. + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + alpha = self._forward_probability(unlabeled_sequence) + beta = self._backward_probability(unlabeled_sequence) + normalisation = logsumexp2(alpha[T - 1]) + + entropies = np.zeros(T, np.float64) + probs = np.zeros(N, np.float64) + for t in range(T): + for s in range(N): + probs[s] = alpha[t, s] + beta[t, s] - normalisation + + for s in range(N): + entropies[t] -= 2 ** (probs[s]) * probs[s] + + return entropies + + def _exhaustive_entropy(self, unlabeled_sequence): + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + labellings = [[state] for state in self._states] + for t in range(T - 1): + current = labellings + labellings = [] + for labelling in current: + for state in self._states: + labellings.append(labelling + [state]) + + log_probs = [] + for labelling in labellings: + labeled_sequence = unlabeled_sequence[:] + for t, label in enumerate(labelling): + labeled_sequence[t] = (labeled_sequence[t][_TEXT], label) + lp = self.log_probability(labeled_sequence) + log_probs.append(lp) + normalisation = _log_add(*log_probs) + + entropy = 0 + for lp in log_probs: + lp -= normalisation + entropy -= 2 ** (lp) * lp + + return entropy + + def _exhaustive_point_entropy(self, unlabeled_sequence): + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + labellings = [[state] for state in self._states] + for t in range(T - 1): + current = labellings + labellings = [] + for labelling in current: + for state in self._states: + labellings.append(labelling + [state]) + + log_probs = [] + for labelling in labellings: + labelled_sequence = unlabeled_sequence[:] + for t, label in enumerate(labelling): + labelled_sequence[t] = (labelled_sequence[t][_TEXT], label) + lp = self.log_probability(labelled_sequence) + log_probs.append(lp) + + normalisation = _log_add(*log_probs) + + probabilities = _ninf_array((T, N)) + + for labelling, lp in zip(labellings, log_probs): + lp -= normalisation + for t, label in enumerate(labelling): + index = self._states.index(label) + probabilities[t, index] = _log_add(probabilities[t, index], lp) + + entropies = np.zeros(T, np.float64) + for t in range(T): + for s in range(N): + entropies[t] -= 2 ** (probabilities[t, s]) * probabilities[t, s] + + return entropies + + def _transitions_matrix(self): + """Return a matrix of transition log probabilities.""" + trans_iter = ( + self._transitions[sj].logprob(si) + for sj in self._states + for si in self._states + ) + + transitions_logprob = np.fromiter(trans_iter, dtype=np.float64) + N = len(self._states) + return transitions_logprob.reshape((N, N)).T + + def _outputs_vector(self, symbol): + """ + Return a vector with log probabilities of emitting a symbol + when entering states. + """ + out_iter = (self._output_logprob(sj, symbol) for sj in self._states) + return np.fromiter(out_iter, dtype=np.float64) + + def _forward_probability(self, unlabeled_sequence): + """ + Return the forward probability matrix, a T by N array of + log-probabilities, where T is the length of the sequence and N is the + number of states. Each entry (t, s) gives the probability of being in + state s at time t after observing the partial symbol sequence up to + and including t. + + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + :return: the forward log probability matrix + :rtype: array + """ + T = len(unlabeled_sequence) + N = len(self._states) + alpha = _ninf_array((T, N)) + + transitions_logprob = self._transitions_matrix() + + # Initialization + symbol = unlabeled_sequence[0][_TEXT] + for i, state in enumerate(self._states): + alpha[0, i] = self._priors.logprob(state) + self._output_logprob( + state, symbol + ) + + # Induction + for t in range(1, T): + symbol = unlabeled_sequence[t][_TEXT] + output_logprob = self._outputs_vector(symbol) + + for i in range(N): + summand = alpha[t - 1] + transitions_logprob[i] + alpha[t, i] = logsumexp2(summand) + output_logprob[i] + + return alpha + + def _backward_probability(self, unlabeled_sequence): + """ + Return the backward probability matrix, a T by N array of + log-probabilities, where T is the length of the sequence and N is the + number of states. Each entry (t, s) gives the probability of being in + state s at time t after observing the partial symbol sequence from t + .. T. + + :return: the backward log probability matrix + :rtype: array + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + T = len(unlabeled_sequence) + N = len(self._states) + beta = _ninf_array((T, N)) + + transitions_logprob = self._transitions_matrix().T + + # initialise the backward values; + # "1" is an arbitrarily chosen value from Rabiner tutorial + beta[T - 1, :] = np.log2(1) + + # inductively calculate remaining backward values + for t in range(T - 2, -1, -1): + symbol = unlabeled_sequence[t + 1][_TEXT] + outputs = self._outputs_vector(symbol) + + for i in range(N): + summand = transitions_logprob[i] + beta[t + 1] + outputs + beta[t, i] = logsumexp2(summand) + + return beta + + def test(self, test_sequence, verbose=False, **kwargs): + """ + Tests the HiddenMarkovModelTagger instance. + + :param test_sequence: a sequence of labeled test instances + :type test_sequence: list(list) + :param verbose: boolean flag indicating whether training should be + verbose or include printed output + :type verbose: bool + """ + + def words(sent): + return [word for (word, tag) in sent] + + def tags(sent): + return [tag for (word, tag) in sent] + + def flatten(seq): + return list(itertools.chain(*seq)) + + test_sequence = self._transform(test_sequence) + predicted_sequence = list(map(self._tag, map(words, test_sequence))) + + if verbose: + for test_sent, predicted_sent in zip(test_sequence, predicted_sequence): + print( + "Test:", + " ".join(f"{token}/{tag}" for (token, tag) in test_sent), + ) + print() + print("Untagged:", " ".join("%s" % token for (token, tag) in test_sent)) + print() + print( + "HMM-tagged:", + " ".join(f"{token}/{tag}" for (token, tag) in predicted_sent), + ) + print() + print( + "Entropy:", + self.entropy([(token, None) for (token, tag) in predicted_sent]), + ) + print() + print("-" * 60) + + test_tags = flatten(map(tags, test_sequence)) + predicted_tags = flatten(map(tags, predicted_sequence)) + + acc = accuracy(test_tags, predicted_tags) + count = sum(len(sent) for sent in test_sequence) + print("accuracy over %d tokens: %.2f" % (count, acc * 100)) + + def __repr__(self): + return "" % ( + len(self._states), + len(self._symbols), + ) + + +class HiddenMarkovModelTrainer: + """ + Algorithms for learning HMM parameters from training data. These include + both supervised learning (MLE) and unsupervised learning (Baum-Welch). + + Creates an HMM trainer to induce an HMM with the given states and + output symbol alphabet. A supervised and unsupervised training + method may be used. If either of the states or symbols are not given, + these may be derived from supervised training. + + :param states: the set of state labels + :type states: sequence of any + :param symbols: the set of observation symbols + :type symbols: sequence of any + """ + + def __init__(self, states=None, symbols=None): + self._states = states if states else [] + self._symbols = symbols if symbols else [] + + def train(self, labeled_sequences=None, unlabeled_sequences=None, **kwargs): + """ + Trains the HMM using both (or either of) supervised and unsupervised + techniques. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param labelled_sequences: the supervised training data, a set of + labelled sequences of observations + ex: [ (word_1, tag_1),...,(word_n,tag_n) ] + :type labelled_sequences: list + :param unlabeled_sequences: the unsupervised training data, a set of + sequences of observations + ex: [ word_1, ..., word_n ] + :type unlabeled_sequences: list + :param kwargs: additional arguments to pass to the training methods + """ + assert labeled_sequences or unlabeled_sequences + model = None + if labeled_sequences: + model = self.train_supervised(labeled_sequences, **kwargs) + if unlabeled_sequences: + if model: + kwargs["model"] = model + model = self.train_unsupervised(unlabeled_sequences, **kwargs) + return model + + def _baum_welch_step(self, sequence, model, symbol_to_number): + + N = len(model._states) + M = len(model._symbols) + T = len(sequence) + + # compute forward and backward probabilities + alpha = model._forward_probability(sequence) + beta = model._backward_probability(sequence) + + # find the log probability of the sequence + lpk = logsumexp2(alpha[T - 1]) + + A_numer = _ninf_array((N, N)) + B_numer = _ninf_array((N, M)) + A_denom = _ninf_array(N) + B_denom = _ninf_array(N) + + transitions_logprob = model._transitions_matrix().T + + for t in range(T): + symbol = sequence[t][_TEXT] # not found? FIXME + next_symbol = None + if t < T - 1: + next_symbol = sequence[t + 1][_TEXT] # not found? FIXME + xi = symbol_to_number[symbol] + + next_outputs_logprob = model._outputs_vector(next_symbol) + alpha_plus_beta = alpha[t] + beta[t] + + if t < T - 1: + numer_add = ( + transitions_logprob + + next_outputs_logprob + + beta[t + 1] + + alpha[t].reshape(N, 1) + ) + A_numer = np.logaddexp2(A_numer, numer_add) + A_denom = np.logaddexp2(A_denom, alpha_plus_beta) + else: + B_denom = np.logaddexp2(A_denom, alpha_plus_beta) + + B_numer[:, xi] = np.logaddexp2(B_numer[:, xi], alpha_plus_beta) + + return lpk, A_numer, A_denom, B_numer, B_denom + + def train_unsupervised(self, unlabeled_sequences, update_outputs=True, **kwargs): + """ + Trains the HMM using the Baum-Welch algorithm to maximise the + probability of the data sequence. This is a variant of the EM + algorithm, and is unsupervised in that it doesn't need the state + sequences for the symbols. The code is based on 'A Tutorial on Hidden + Markov Models and Selected Applications in Speech Recognition', + Lawrence Rabiner, IEEE, 1989. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param unlabeled_sequences: the training data, a set of + sequences of observations + :type unlabeled_sequences: list + + kwargs may include following parameters: + + :param model: a HiddenMarkovModelTagger instance used to begin + the Baum-Welch algorithm + :param max_iterations: the maximum number of EM iterations + :param convergence_logprob: the maximum change in log probability to + allow convergence + """ + + # create a uniform HMM, which will be iteratively refined, unless + # given an existing model + model = kwargs.get("model") + if not model: + priors = RandomProbDist(self._states) + transitions = DictionaryConditionalProbDist( + {state: RandomProbDist(self._states) for state in self._states} + ) + outputs = DictionaryConditionalProbDist( + {state: RandomProbDist(self._symbols) for state in self._states} + ) + model = HiddenMarkovModelTagger( + self._symbols, self._states, transitions, outputs, priors + ) + + self._states = model._states + self._symbols = model._symbols + + N = len(self._states) + M = len(self._symbols) + symbol_numbers = {sym: i for i, sym in enumerate(self._symbols)} + + # update model prob dists so that they can be modified + # model._priors = MutableProbDist(model._priors, self._states) + + model._transitions = DictionaryConditionalProbDist( + { + s: MutableProbDist(model._transitions[s], self._states) + for s in self._states + } + ) + + if update_outputs: + model._outputs = DictionaryConditionalProbDist( + { + s: MutableProbDist(model._outputs[s], self._symbols) + for s in self._states + } + ) + + model.reset_cache() + + # iterate until convergence + converged = False + last_logprob = None + iteration = 0 + max_iterations = kwargs.get("max_iterations", 1000) + epsilon = kwargs.get("convergence_logprob", 1e-6) + + while not converged and iteration < max_iterations: + A_numer = _ninf_array((N, N)) + B_numer = _ninf_array((N, M)) + A_denom = _ninf_array(N) + B_denom = _ninf_array(N) + + logprob = 0 + for sequence in unlabeled_sequences: + sequence = list(sequence) + if not sequence: + continue + + ( + lpk, + seq_A_numer, + seq_A_denom, + seq_B_numer, + seq_B_denom, + ) = self._baum_welch_step(sequence, model, symbol_numbers) + + # add these sums to the global A and B values + for i in range(N): + A_numer[i] = np.logaddexp2(A_numer[i], seq_A_numer[i] - lpk) + B_numer[i] = np.logaddexp2(B_numer[i], seq_B_numer[i] - lpk) + + A_denom = np.logaddexp2(A_denom, seq_A_denom - lpk) + B_denom = np.logaddexp2(B_denom, seq_B_denom - lpk) + + logprob += lpk + + # use the calculated values to update the transition and output + # probability values + for i in range(N): + logprob_Ai = A_numer[i] - A_denom[i] + logprob_Bi = B_numer[i] - B_denom[i] + + # We should normalize all probabilities (see p.391 Huang et al) + # Let sum(P) be K. + # We can divide each Pi by K to make sum(P) == 1. + # Pi' = Pi/K + # log2(Pi') = log2(Pi) - log2(K) + logprob_Ai -= logsumexp2(logprob_Ai) + logprob_Bi -= logsumexp2(logprob_Bi) + + # update output and transition probabilities + si = self._states[i] + + for j in range(N): + sj = self._states[j] + model._transitions[si].update(sj, logprob_Ai[j]) + + if update_outputs: + for k in range(M): + ok = self._symbols[k] + model._outputs[si].update(ok, logprob_Bi[k]) + + # Rabiner says the priors don't need to be updated. I don't + # believe him. FIXME + + # test for convergence + if iteration > 0 and abs(logprob - last_logprob) < epsilon: + converged = True + + print("iteration", iteration, "logprob", logprob) + iteration += 1 + last_logprob = logprob + + return model + + def train_supervised(self, labelled_sequences, estimator=None): + """ + Supervised training maximising the joint probability of the symbol and + state sequences. This is done via collecting frequencies of + transitions between states, symbol observations while within each + state and which states start a sentence. These frequency distributions + are then normalised into probability estimates, which can be + smoothed if desired. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param labelled_sequences: the training data, a set of + labelled sequences of observations + :type labelled_sequences: list + :param estimator: a function taking + a FreqDist and a number of bins and returning a CProbDistI; + otherwise a MLE estimate is used + """ + + # default to the MLE estimate + if estimator is None: + estimator = lambda fdist, bins: MLEProbDist(fdist) + + # count occurrences of starting states, transitions out of each state + # and output symbols observed in each state + known_symbols = set(self._symbols) + known_states = set(self._states) + + starting = FreqDist() + transitions = ConditionalFreqDist() + outputs = ConditionalFreqDist() + for sequence in labelled_sequences: + lasts = None + for token in sequence: + state = token[_TAG] + symbol = token[_TEXT] + if lasts is None: + starting[state] += 1 + else: + transitions[lasts][state] += 1 + outputs[state][symbol] += 1 + lasts = state + + # update the state and symbol lists + if state not in known_states: + self._states.append(state) + known_states.add(state) + + if symbol not in known_symbols: + self._symbols.append(symbol) + known_symbols.add(symbol) + + # create probability distributions (with smoothing) + N = len(self._states) + pi = estimator(starting, N) + A = ConditionalProbDist(transitions, estimator, N) + B = ConditionalProbDist(outputs, estimator, len(self._symbols)) + + return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi) + + +def _ninf_array(shape): + res = np.empty(shape, np.float64) + res.fill(-np.inf) + return res + + +def logsumexp2(arr): + max_ = arr.max() + return np.log2(np.sum(2 ** (arr - max_))) + max_ + + +def _log_add(*values): + """ + Adds the logged values, returning the logarithm of the addition. + """ + x = max(values) + if x > -np.inf: + sum_diffs = 0 + for value in values: + sum_diffs += 2 ** (value - x) + return x + np.log2(sum_diffs) + else: + return x + + +def _create_hmm_tagger(states, symbols, A, B, pi): + def pd(values, samples): + d = dict(zip(samples, values)) + return DictionaryProbDist(d) + + def cpd(array, conditions, samples): + d = {} + for values, condition in zip(array, conditions): + d[condition] = pd(values, samples) + return DictionaryConditionalProbDist(d) + + A = cpd(A, states, states) + B = cpd(B, states, symbols) + pi = pd(pi, states) + return HiddenMarkovModelTagger( + symbols=symbols, states=states, transitions=A, outputs=B, priors=pi + ) + + +def _market_hmm_example(): + """ + Return an example HMM (described at page 381, Huang et al) + """ + states = ["bull", "bear", "static"] + symbols = ["up", "down", "unchanged"] + A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], np.float64) + B = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], np.float64) + pi = np.array([0.5, 0.2, 0.3], np.float64) + + model = _create_hmm_tagger(states, symbols, A, B, pi) + return model, states, symbols + + +def demo(): + # demonstrates HMM probability calculation + + print() + print("HMM probability calculation demo") + print() + + model, states, symbols = _market_hmm_example() + + print("Testing", model) + + for test in [ + ["up", "up"], + ["up", "down", "up"], + ["down"] * 5, + ["unchanged"] * 5 + ["up"], + ]: + + sequence = [(t, None) for t in test] + + print("Testing with state sequence", test) + print("probability =", model.probability(sequence)) + print("tagging = ", model.tag([word for (word, tag) in sequence])) + print("p(tagged) = ", model.probability(sequence)) + print("H = ", model.entropy(sequence)) + print("H_exh = ", model._exhaustive_entropy(sequence)) + print("H(point) = ", model.point_entropy(sequence)) + print("H_exh(point)=", model._exhaustive_point_entropy(sequence)) + print() + + +def load_pos(num_sents): + from nltk.corpus import brown + + sentences = brown.tagged_sents(categories="news")[:num_sents] + + tag_re = re.compile(r"[*]|--|[^+*-]+") + tag_set = set() + symbols = set() + + cleaned_sentences = [] + for sentence in sentences: + for i in range(len(sentence)): + word, tag = sentence[i] + word = word.lower() # normalize + symbols.add(word) # log this word + # Clean up the tag. + tag = tag_re.match(tag).group() + tag_set.add(tag) + sentence[i] = (word, tag) # store cleaned-up tagged token + cleaned_sentences += [sentence] + + return cleaned_sentences, list(tag_set), list(symbols) + + +def demo_pos(): + # demonstrates POS tagging using supervised training + + print() + print("HMM POS tagging demo") + print() + + print("Training HMM...") + labelled_sequences, tag_set, symbols = load_pos(20000) + trainer = HiddenMarkovModelTrainer(tag_set, symbols) + hmm = trainer.train_supervised( + labelled_sequences[10:], + estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins), + ) + + print("Testing...") + hmm.test(labelled_sequences[:10], verbose=True) + + +def _untag(sentences): + unlabeled = [] + for sentence in sentences: + unlabeled.append([(token[_TEXT], None) for token in sentence]) + return unlabeled + + +def demo_pos_bw( + test=10, supervised=20, unsupervised=10, verbose=True, max_iterations=5 +): + # demonstrates the Baum-Welch algorithm in POS tagging + + print() + print("Baum-Welch demo for POS tagging") + print() + + print("Training HMM (supervised, %d sentences)..." % supervised) + + sentences, tag_set, symbols = load_pos(test + supervised + unsupervised) + + symbols = set() + for sentence in sentences: + for token in sentence: + symbols.add(token[_TEXT]) + + trainer = HiddenMarkovModelTrainer(tag_set, list(symbols)) + hmm = trainer.train_supervised( + sentences[test : test + supervised], + estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins), + ) + + hmm.test(sentences[:test], verbose=verbose) + + print("Training (unsupervised, %d sentences)..." % unsupervised) + # it's rather slow - so only use 10 samples by default + unlabeled = _untag(sentences[test + supervised :]) + hmm = trainer.train_unsupervised( + unlabeled, model=hmm, max_iterations=max_iterations + ) + hmm.test(sentences[:test], verbose=verbose) + + +def demo_bw(): + # demo Baum Welch by generating some sequences and then performing + # unsupervised training on them + + print() + print("Baum-Welch demo for market example") + print() + + model, states, symbols = _market_hmm_example() + + # generate some random sequences + training = [] + import random + + rng = random.Random() + rng.seed(0) + for i in range(10): + item = model.random_sample(rng, 5) + training.append([(i[0], None) for i in item]) + + # train on those examples, starting with the model that generated them + trainer = HiddenMarkovModelTrainer(states, symbols) + hmm = trainer.train_unsupervised(training, model=model, max_iterations=1000) diff --git a/lib/python3.10/site-packages/nltk/tag/hunpos.py b/lib/python3.10/site-packages/nltk/tag/hunpos.py new file mode 100644 index 0000000000000000000000000000000000000000..e001c6d6dbc1257515ed1149abe6bab06f1c7337 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/hunpos.py @@ -0,0 +1,142 @@ +# Natural Language Toolkit: Interface to the HunPos POS-tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Ljunglöf +# Dávid Márk Nemeskey (modifications) +# Attila Zséder (modifications) +# URL: +# For license information, see LICENSE.TXT + +""" +A module for interfacing with the HunPos open-source POS-tagger. +""" + +import os +from subprocess import PIPE, Popen + +from nltk.internals import find_binary, find_file +from nltk.tag.api import TaggerI + +_hunpos_url = "https://code.google.com/p/hunpos/" + +_hunpos_charset = "ISO-8859-1" +"""The default encoding used by hunpos: ISO-8859-1.""" + + +class HunposTagger(TaggerI): + """ + A class for pos tagging with HunPos. The input is the paths to: + - a model trained on training data + - (optionally) the path to the hunpos-tag binary + - (optionally) the encoding of the training data (default: ISO-8859-1) + + Check whether the required "hunpos-tag" binary is available: + + >>> from nltk.test.setup_fixt import check_binary + >>> check_binary('hunpos-tag') + + Example: + >>> from nltk.tag import HunposTagger + >>> ht = HunposTagger('en_wsj.model') + >>> ht.tag('What is the airspeed of an unladen swallow ?'.split()) + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'VB'), ('?', '.')] + >>> ht.close() + + This class communicates with the hunpos-tag binary via pipes. When the + tagger object is no longer needed, the close() method should be called to + free system resources. The class supports the context manager interface; if + used in a with statement, the close() method is invoked automatically: + + >>> with HunposTagger('en_wsj.model') as ht: + ... ht.tag('What is the airspeed of an unladen swallow ?'.split()) + ... + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'VB'), ('?', '.')] + """ + + def __init__( + self, path_to_model, path_to_bin=None, encoding=_hunpos_charset, verbose=False + ): + """ + Starts the hunpos-tag executable and establishes a connection with it. + + :param path_to_model: The model file. + :param path_to_bin: The hunpos-tag binary. + :param encoding: The encoding used by the model. Unicode tokens + passed to the tag() and tag_sents() methods are converted to + this charset when they are sent to hunpos-tag. + The default is ISO-8859-1 (Latin-1). + + This parameter is ignored for str tokens, which are sent as-is. + The caller must ensure that tokens are encoded in the right charset. + """ + self._closed = True + hunpos_paths = [ + ".", + "/usr/bin", + "/usr/local/bin", + "/opt/local/bin", + "/Applications/bin", + "~/bin", + "~/Applications/bin", + ] + hunpos_paths = list(map(os.path.expanduser, hunpos_paths)) + + self._hunpos_bin = find_binary( + "hunpos-tag", + path_to_bin, + env_vars=("HUNPOS_TAGGER",), + searchpath=hunpos_paths, + url=_hunpos_url, + verbose=verbose, + ) + + self._hunpos_model = find_file( + path_to_model, env_vars=("HUNPOS_TAGGER",), verbose=verbose + ) + self._encoding = encoding + self._hunpos = Popen( + [self._hunpos_bin, self._hunpos_model], + shell=False, + stdin=PIPE, + stdout=PIPE, + stderr=PIPE, + ) + self._closed = False + + def __del__(self): + self.close() + + def close(self): + """Closes the pipe to the hunpos executable.""" + if not self._closed: + self._hunpos.communicate() + self._closed = True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def tag(self, tokens): + """Tags a single sentence: a list of words. + The tokens should not contain any newline characters. + """ + for token in tokens: + assert "\n" not in token, "Tokens should not contain newlines" + if isinstance(token, str): + token = token.encode(self._encoding) + self._hunpos.stdin.write(token + b"\n") + # We write a final empty line to tell hunpos that the sentence is finished: + self._hunpos.stdin.write(b"\n") + self._hunpos.stdin.flush() + + tagged_tokens = [] + for token in tokens: + tagged = self._hunpos.stdout.readline().strip().split(b"\t") + tag = tagged[1] if len(tagged) > 1 else None + tagged_tokens.append((token, tag)) + # We have to read (and dismiss) the final empty line: + self._hunpos.stdout.readline() + + return tagged_tokens diff --git a/lib/python3.10/site-packages/nltk/tag/mapping.py b/lib/python3.10/site-packages/nltk/tag/mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..0af1a0eef945b3cfb2bb3a5860b223a42dbaeae7 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/mapping.py @@ -0,0 +1,136 @@ +# Natural Language Toolkit: Tagset Mapping +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nathan Schneider +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Interface for converting POS tags from various treebanks +to the universal tagset of Petrov, Das, & McDonald. + +The tagset consists of the following 12 coarse tags: + +VERB - verbs (all tenses and modes) +NOUN - nouns (common and proper) +PRON - pronouns +ADJ - adjectives +ADV - adverbs +ADP - adpositions (prepositions and postpositions) +CONJ - conjunctions +DET - determiners +NUM - cardinal numbers +PRT - particles or other function words +X - other: foreign words, typos, abbreviations +. - punctuation + +@see: https://arxiv.org/abs/1104.2086 and https://code.google.com/p/universal-pos-tags/ + +""" + +from collections import defaultdict +from os.path import join + +from nltk.data import load + +_UNIVERSAL_DATA = "taggers/universal_tagset" +_UNIVERSAL_TAGS = ( + "VERB", + "NOUN", + "PRON", + "ADJ", + "ADV", + "ADP", + "CONJ", + "DET", + "NUM", + "PRT", + "X", + ".", +) + +# _MAPPINGS = defaultdict(lambda: defaultdict(dict)) +# the mapping between tagset T1 and T2 returns UNK if applied to an unrecognized tag +_MAPPINGS = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: "UNK"))) + + +def _load_universal_map(fileid): + contents = load(join(_UNIVERSAL_DATA, fileid + ".map"), format="text") + + # When mapping to the Universal Tagset, + # map unknown inputs to 'X' not 'UNK' + _MAPPINGS[fileid]["universal"].default_factory = lambda: "X" + + for line in contents.splitlines(): + line = line.strip() + if line == "": + continue + fine, coarse = line.split("\t") + + assert coarse in _UNIVERSAL_TAGS, f"Unexpected coarse tag: {coarse}" + assert ( + fine not in _MAPPINGS[fileid]["universal"] + ), f"Multiple entries for original tag: {fine}" + + _MAPPINGS[fileid]["universal"][fine] = coarse + + +def tagset_mapping(source, target): + """ + Retrieve the mapping dictionary between tagsets. + + >>> tagset_mapping('ru-rnc', 'universal') == {'!': '.', 'A': 'ADJ', 'C': 'CONJ', 'AD': 'ADV',\ + 'NN': 'NOUN', 'VG': 'VERB', 'COMP': 'CONJ', 'NC': 'NUM', 'VP': 'VERB', 'P': 'ADP',\ + 'IJ': 'X', 'V': 'VERB', 'Z': 'X', 'VI': 'VERB', 'YES_NO_SENT': 'X', 'PTCL': 'PRT'} + True + """ + + if source not in _MAPPINGS or target not in _MAPPINGS[source]: + if target == "universal": + _load_universal_map(source) + # Added the new Russian National Corpus mappings because the + # Russian model for nltk.pos_tag() uses it. + _MAPPINGS["ru-rnc-new"]["universal"] = { + "A": "ADJ", + "A-PRO": "PRON", + "ADV": "ADV", + "ADV-PRO": "PRON", + "ANUM": "ADJ", + "CONJ": "CONJ", + "INTJ": "X", + "NONLEX": ".", + "NUM": "NUM", + "PARENTH": "PRT", + "PART": "PRT", + "PR": "ADP", + "PRAEDIC": "PRT", + "PRAEDIC-PRO": "PRON", + "S": "NOUN", + "S-PRO": "PRON", + "V": "VERB", + } + + return _MAPPINGS[source][target] + + +def map_tag(source, target, source_tag): + """ + Maps the tag from the source tagset to the target tagset. + + >>> map_tag('en-ptb', 'universal', 'VBZ') + 'VERB' + >>> map_tag('en-ptb', 'universal', 'VBP') + 'VERB' + >>> map_tag('en-ptb', 'universal', '``') + '.' + """ + + # we need a systematic approach to naming + if target == "universal": + if source == "wsj": + source = "en-ptb" + if source == "brown": + source = "en-brown" + + return tagset_mapping(source, target)[source_tag] diff --git a/lib/python3.10/site-packages/nltk/tag/perceptron.py b/lib/python3.10/site-packages/nltk/tag/perceptron.py new file mode 100644 index 0000000000000000000000000000000000000000..9afe08f0c8d6a9d5852a225e6c9569a291fb1e3d --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/perceptron.py @@ -0,0 +1,371 @@ +# This module is a port of the Textblob Averaged Perceptron Tagger +# Author: Matthew Honnibal , +# Long Duong (NLTK port) +# URL: +# +# Copyright 2013 Matthew Honnibal +# NLTK modifications Copyright 2015 The NLTK Project +# +# This module is provided under the terms of the MIT License. + +import logging +import pickle +import random +from collections import defaultdict + +from nltk import jsontags +from nltk.data import find, load +from nltk.tag.api import TaggerI + +try: + import numpy as np +except ImportError: + pass + +PICKLE = "averaged_perceptron_tagger.pickle" + + +@jsontags.register_tag +class AveragedPerceptron: + + """An averaged perceptron, as implemented by Matthew Honnibal. + + See more implementation details here: + https://explosion.ai/blog/part-of-speech-pos-tagger-in-python + """ + + json_tag = "nltk.tag.perceptron.AveragedPerceptron" + + def __init__(self, weights=None): + # Each feature gets its own weight vector, so weights is a dict-of-dicts + self.weights = weights if weights else {} + self.classes = set() + # The accumulated values, for the averaging. These will be keyed by + # feature/clas tuples + self._totals = defaultdict(int) + # The last time the feature was changed, for the averaging. Also + # keyed by feature/clas tuples + # (tstamps is short for timestamps) + self._tstamps = defaultdict(int) + # Number of instances seen + self.i = 0 + + def _softmax(self, scores): + s = np.fromiter(scores.values(), dtype=float) + exps = np.exp(s) + return exps / np.sum(exps) + + def predict(self, features, return_conf=False): + """Dot-product the features and current weights and return the best label.""" + scores = defaultdict(float) + for feat, value in features.items(): + if feat not in self.weights or value == 0: + continue + weights = self.weights[feat] + for label, weight in weights.items(): + scores[label] += value * weight + + # Do a secondary alphabetic sort, for stability + best_label = max(self.classes, key=lambda label: (scores[label], label)) + # compute the confidence + conf = max(self._softmax(scores)) if return_conf == True else None + + return best_label, conf + + def update(self, truth, guess, features): + """Update the feature weights.""" + + def upd_feat(c, f, w, v): + param = (f, c) + self._totals[param] += (self.i - self._tstamps[param]) * w + self._tstamps[param] = self.i + self.weights[f][c] = w + v + + self.i += 1 + if truth == guess: + return None + for f in features: + weights = self.weights.setdefault(f, {}) + upd_feat(truth, f, weights.get(truth, 0.0), 1.0) + upd_feat(guess, f, weights.get(guess, 0.0), -1.0) + + def average_weights(self): + """Average weights from all iterations.""" + for feat, weights in self.weights.items(): + new_feat_weights = {} + for clas, weight in weights.items(): + param = (feat, clas) + total = self._totals[param] + total += (self.i - self._tstamps[param]) * weight + averaged = round(total / self.i, 3) + if averaged: + new_feat_weights[clas] = averaged + self.weights[feat] = new_feat_weights + + def save(self, path): + """Save the pickled model weights.""" + with open(path, "wb") as fout: + return pickle.dump(dict(self.weights), fout) + + def load(self, path): + """Load the pickled model weights.""" + self.weights = load(path) + + def encode_json_obj(self): + return self.weights + + @classmethod + def decode_json_obj(cls, obj): + return cls(obj) + + +@jsontags.register_tag +class PerceptronTagger(TaggerI): + + """ + Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal. + See more implementation details here: + https://explosion.ai/blog/part-of-speech-pos-tagger-in-python + + >>> from nltk.tag.perceptron import PerceptronTagger + + Train the model + + >>> tagger = PerceptronTagger(load=False) + + >>> tagger.train([[('today','NN'),('is','VBZ'),('good','JJ'),('day','NN')], + ... [('yes','NNS'),('it','PRP'),('beautiful','JJ')]]) + + >>> tagger.tag(['today','is','a','beautiful','day']) + [('today', 'NN'), ('is', 'PRP'), ('a', 'PRP'), ('beautiful', 'JJ'), ('day', 'NN')] + + Use the pretrain model (the default constructor) + + >>> pretrain = PerceptronTagger() + + >>> pretrain.tag('The quick brown fox jumps over the lazy dog'.split()) + [('The', 'DT'), ('quick', 'JJ'), ('brown', 'NN'), ('fox', 'NN'), ('jumps', 'VBZ'), ('over', 'IN'), ('the', 'DT'), ('lazy', 'JJ'), ('dog', 'NN')] + + >>> pretrain.tag("The red cat".split()) + [('The', 'DT'), ('red', 'JJ'), ('cat', 'NN')] + """ + + json_tag = "nltk.tag.sequential.PerceptronTagger" + + START = ["-START-", "-START2-"] + END = ["-END-", "-END2-"] + + def __init__(self, load=True): + """ + :param load: Load the pickled model upon instantiation. + """ + self.model = AveragedPerceptron() + self.tagdict = {} + self.classes = set() + if load: + AP_MODEL_LOC = "file:" + str( + find("taggers/averaged_perceptron_tagger/" + PICKLE) + ) + self.load(AP_MODEL_LOC) + + def tag(self, tokens, return_conf=False, use_tagdict=True): + """ + Tag tokenized sentences. + :params tokens: list of word + :type tokens: list(str) + """ + prev, prev2 = self.START + output = [] + + context = self.START + [self.normalize(w) for w in tokens] + self.END + for i, word in enumerate(tokens): + tag, conf = ( + (self.tagdict.get(word), 1.0) if use_tagdict == True else (None, None) + ) + if not tag: + features = self._get_features(i, word, context, prev, prev2) + tag, conf = self.model.predict(features, return_conf) + output.append((word, tag, conf) if return_conf == True else (word, tag)) + + prev2 = prev + prev = tag + + return output + + def train(self, sentences, save_loc=None, nr_iter=5): + """Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` + controls the number of Perceptron training iterations. + + :param sentences: A list or iterator of sentences, where each sentence + is a list of (words, tags) tuples. + :param save_loc: If not ``None``, saves a pickled model in this location. + :param nr_iter: Number of training iterations. + """ + # We'd like to allow ``sentences`` to be either a list or an iterator, + # the latter being especially important for a large training dataset. + # Because ``self._make_tagdict(sentences)`` runs regardless, we make + # it populate ``self._sentences`` (a list) with all the sentences. + # This saves the overheard of just iterating through ``sentences`` to + # get the list by ``sentences = list(sentences)``. + + self._sentences = list() # to be populated by self._make_tagdict... + self._make_tagdict(sentences) + self.model.classes = self.classes + for iter_ in range(nr_iter): + c = 0 + n = 0 + for sentence in self._sentences: + words, tags = zip(*sentence) + + prev, prev2 = self.START + context = self.START + [self.normalize(w) for w in words] + self.END + for i, word in enumerate(words): + guess = self.tagdict.get(word) + if not guess: + feats = self._get_features(i, word, context, prev, prev2) + guess, _ = self.model.predict(feats) + self.model.update(tags[i], guess, feats) + prev2 = prev + prev = guess + c += guess == tags[i] + n += 1 + random.shuffle(self._sentences) + logging.info(f"Iter {iter_}: {c}/{n}={_pc(c, n)}") + + # We don't need the training sentences anymore, and we don't want to + # waste space on them when we pickle the trained tagger. + self._sentences = None + + self.model.average_weights() + # Pickle as a binary file + if save_loc is not None: + with open(save_loc, "wb") as fout: + # changed protocol from -1 to 2 to make pickling Python 2 compatible + pickle.dump((self.model.weights, self.tagdict, self.classes), fout, 2) + + def load(self, loc): + """ + :param loc: Load a pickled model at location. + :type loc: str + """ + + self.model.weights, self.tagdict, self.classes = load(loc) + self.model.classes = self.classes + + def encode_json_obj(self): + return self.model.weights, self.tagdict, list(self.classes) + + @classmethod + def decode_json_obj(cls, obj): + tagger = cls(load=False) + tagger.model.weights, tagger.tagdict, tagger.classes = obj + tagger.classes = set(tagger.classes) + tagger.model.classes = tagger.classes + return tagger + + def normalize(self, word): + """ + Normalization used in pre-processing. + - All words are lower cased + - Groups of digits of length 4 are represented as !YEAR; + - Other digits are represented as !DIGITS + + :rtype: str + """ + if "-" in word and word[0] != "-": + return "!HYPHEN" + if word.isdigit() and len(word) == 4: + return "!YEAR" + if word and word[0].isdigit(): + return "!DIGITS" + return word.lower() + + def _get_features(self, i, word, context, prev, prev2): + """Map tokens into a feature representation, implemented as a + {hashable: int} dict. If the features change, a new model must be + trained. + """ + + def add(name, *args): + features[" ".join((name,) + tuple(args))] += 1 + + i += len(self.START) + features = defaultdict(int) + # It's useful to have a constant feature, which acts sort of like a prior + add("bias") + add("i suffix", word[-3:]) + add("i pref1", word[0] if word else "") + add("i-1 tag", prev) + add("i-2 tag", prev2) + add("i tag+i-2 tag", prev, prev2) + add("i word", context[i]) + add("i-1 tag+i word", prev, context[i]) + add("i-1 word", context[i - 1]) + add("i-1 suffix", context[i - 1][-3:]) + add("i-2 word", context[i - 2]) + add("i+1 word", context[i + 1]) + add("i+1 suffix", context[i + 1][-3:]) + add("i+2 word", context[i + 2]) + return features + + def _make_tagdict(self, sentences): + """ + Make a tag dictionary for single-tag words. + :param sentences: A list of list of (word, tag) tuples. + """ + counts = defaultdict(lambda: defaultdict(int)) + for sentence in sentences: + self._sentences.append(sentence) + for word, tag in sentence: + counts[word][tag] += 1 + self.classes.add(tag) + freq_thresh = 20 + ambiguity_thresh = 0.97 + for word, tag_freqs in counts.items(): + tag, mode = max(tag_freqs.items(), key=lambda item: item[1]) + n = sum(tag_freqs.values()) + # Don't add rare words to the tag dictionary + # Only add quite unambiguous words + if n >= freq_thresh and (mode / n) >= ambiguity_thresh: + self.tagdict[word] = tag + + +def _pc(n, d): + return (n / d) * 100 + + +def _load_data_conll_format(filename): + print("Read from file: ", filename) + with open(filename, "rb") as fin: + sentences = [] + sentence = [] + for line in fin.readlines(): + line = line.strip() + # print line + if len(line) == 0: + sentences.append(sentence) + sentence = [] + continue + tokens = line.split("\t") + word = tokens[1] + tag = tokens[4] + sentence.append((word, tag)) + return sentences + + +def _get_pretrain_model(): + # Train and test on English part of ConLL data (WSJ part of Penn Treebank) + # Train: section 2-11 + # Test : section 23 + tagger = PerceptronTagger() + training = _load_data_conll_format("english_ptb_train.conll") + testing = _load_data_conll_format("english_ptb_test.conll") + print("Size of training and testing (sentence)", len(training), len(testing)) + # Train and save the model + tagger.train(training, PICKLE) + print("Accuracy : ", tagger.accuracy(testing)) + + +if __name__ == "__main__": + # _get_pretrain_model() + pass diff --git a/lib/python3.10/site-packages/nltk/tag/senna.py b/lib/python3.10/site-packages/nltk/tag/senna.py new file mode 100644 index 0000000000000000000000000000000000000000..7b52b7ee0a7bc01614c3a2a397a6ffce47835999 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/senna.py @@ -0,0 +1,134 @@ +# Natural Language Toolkit: Senna POS Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Rami Al-Rfou' +# URL: +# For license information, see LICENSE.TXT + +""" +Senna POS tagger, NER Tagger, Chunk Tagger + +The input is: + +- path to the directory that contains SENNA executables. If the path is incorrect, + SennaTagger will automatically search for executable file specified in SENNA environment variable +- (optionally) the encoding of the input data (default:utf-8) + +Note: Unit tests for this module can be found in test/unit/test_senna.py + +>>> from nltk.tag import SennaTagger +>>> tagger = SennaTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> tagger.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP +[('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), +('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'NN'), ('?', '.')] + +>>> from nltk.tag import SennaChunkTagger +>>> chktagger = SennaChunkTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> chktagger.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP +[('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed', 'I-NP'), +('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow', 'I-NP'), +('?', 'O')] + +>>> from nltk.tag import SennaNERTagger +>>> nertagger = SennaNERTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> nertagger.tag('Shakespeare theatre was in London .'.split()) # doctest: +SKIP +[('Shakespeare', 'B-PER'), ('theatre', 'O'), ('was', 'O'), ('in', 'O'), +('London', 'B-LOC'), ('.', 'O')] +>>> nertagger.tag('UN headquarters are in NY , USA .'.split()) # doctest: +SKIP +[('UN', 'B-ORG'), ('headquarters', 'O'), ('are', 'O'), ('in', 'O'), +('NY', 'B-LOC'), (',', 'O'), ('USA', 'B-LOC'), ('.', 'O')] +""" + +from nltk.classify import Senna + + +class SennaTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["pos"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["pos"]) + return tagged_sents + + +class SennaChunkTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["chk"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["chk"]) + return tagged_sents + + def bio_to_chunks(self, tagged_sent, chunk_type): + """ + Extracts the chunks in a BIO chunk-tagged sentence. + + >>> from nltk.tag import SennaChunkTagger + >>> chktagger = SennaChunkTagger('/usr/share/senna-v3.0') # doctest: +SKIP + >>> sent = 'What is the airspeed of an unladen swallow ?'.split() + >>> tagged_sent = chktagger.tag(sent) # doctest: +SKIP + >>> tagged_sent # doctest: +SKIP + [('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed', 'I-NP'), + ('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow', 'I-NP'), + ('?', 'O')] + >>> list(chktagger.bio_to_chunks(tagged_sent, chunk_type='NP')) # doctest: +SKIP + [('What', '0'), ('the airspeed', '2-3'), ('an unladen swallow', '5-6-7')] + + :param tagged_sent: A list of tuples of word and BIO chunk tag. + :type tagged_sent: list(tuple) + :param tagged_sent: The chunk tag that users want to extract, e.g. 'NP' or 'VP' + :type tagged_sent: str + + :return: An iterable of tuples of chunks that users want to extract + and their corresponding indices. + :rtype: iter(tuple(str)) + """ + current_chunk = [] + current_chunk_position = [] + for idx, word_pos in enumerate(tagged_sent): + word, pos = word_pos + if "-" + chunk_type in pos: # Append the word to the current_chunk. + current_chunk.append(word) + current_chunk_position.append(idx) + else: + if current_chunk: # Flush the full chunk when out of an NP. + _chunk_str = " ".join(current_chunk) + _chunk_pos_str = "-".join(map(str, current_chunk_position)) + yield _chunk_str, _chunk_pos_str + current_chunk = [] + current_chunk_position = [] + if current_chunk: # Flush the last chunk. + yield " ".join(current_chunk), "-".join(map(str, current_chunk_position)) + + +class SennaNERTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["ner"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["ner"]) + return tagged_sents diff --git a/lib/python3.10/site-packages/nltk/tag/sequential.py b/lib/python3.10/site-packages/nltk/tag/sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..3fb85c9fade8079ad5fd4ba7a517939741cb2440 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/sequential.py @@ -0,0 +1,755 @@ +# Natural Language Toolkit: Sequential Backoff Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Tiago Tresoldi (original affix tagger) +# URL: +# For license information, see LICENSE.TXT + +""" +Classes for tagging sentences sequentially, left to right. The +abstract base class SequentialBackoffTagger serves as the base +class for all the taggers in this module. Tagging of individual words +is performed by the method ``choose_tag()``, which is defined by +subclasses of SequentialBackoffTagger. If a tagger is unable to +determine a tag for the specified token, then its backoff tagger is +consulted instead. Any SequentialBackoffTagger may serve as a +backoff tagger for any other SequentialBackoffTagger. +""" +import ast +import re +from abc import abstractmethod +from typing import List, Optional, Tuple + +from nltk import jsontags +from nltk.classify import NaiveBayesClassifier +from nltk.probability import ConditionalFreqDist +from nltk.tag.api import FeaturesetTaggerI, TaggerI + + +###################################################################### +# Abstract Base Classes +###################################################################### +class SequentialBackoffTagger(TaggerI): + """ + An abstract base class for taggers that tags words sequentially, + left to right. Tagging of individual words is performed by the + ``choose_tag()`` method, which should be defined by subclasses. If + a tagger is unable to determine a tag for the specified token, + then its backoff tagger is consulted. + + :ivar _taggers: A list of all the taggers that should be tried to + tag a token (i.e., self and its backoff taggers). + """ + + def __init__(self, backoff=None): + if backoff is None: + self._taggers = [self] + else: + self._taggers = [self] + backoff._taggers + + @property + def backoff(self): + """The backoff tagger for this tagger.""" + return self._taggers[1] if len(self._taggers) > 1 else None + + def tag(self, tokens): + # docs inherited from TaggerI + tags = [] + for i in range(len(tokens)): + tags.append(self.tag_one(tokens, i, tags)) + return list(zip(tokens, tags)) + + def tag_one(self, tokens, index, history): + """ + Determine an appropriate tag for the specified token, and + return that tag. If this tagger is unable to determine a tag + for the specified token, then its backoff tagger is consulted. + + :rtype: str + :type tokens: list + :param tokens: The list of words that are being tagged. + :type index: int + :param index: The index of the word whose tag should be + returned. + :type history: list(str) + :param history: A list of the tags for all words before *index*. + """ + tag = None + for tagger in self._taggers: + tag = tagger.choose_tag(tokens, index, history) + if tag is not None: + break + return tag + + @abstractmethod + def choose_tag(self, tokens, index, history): + """ + Decide which tag should be used for the specified token, and + return that tag. If this tagger is unable to determine a tag + for the specified token, return None -- do not consult + the backoff tagger. This method should be overridden by + subclasses of SequentialBackoffTagger. + + :rtype: str + :type tokens: list + :param tokens: The list of words that are being tagged. + :type index: int + :param index: The index of the word whose tag should be + returned. + :type history: list(str) + :param history: A list of the tags for all words before *index*. + """ + + +class ContextTagger(SequentialBackoffTagger): + """ + An abstract base class for sequential backoff taggers that choose + a tag for a token based on the value of its "context". Different + subclasses are used to define different contexts. + + A ContextTagger chooses the tag for a token by calculating the + token's context, and looking up the corresponding tag in a table. + This table can be constructed manually; or it can be automatically + constructed based on a training corpus, using the ``_train()`` + factory method. + + :ivar _context_to_tag: Dictionary mapping contexts to tags. + """ + + def __init__(self, context_to_tag, backoff=None): + """ + :param context_to_tag: A dictionary mapping contexts to tags. + :param backoff: The backoff tagger that should be used for this tagger. + """ + super().__init__(backoff) + self._context_to_tag = context_to_tag if context_to_tag else {} + + @abstractmethod + def context(self, tokens, index, history): + """ + :return: the context that should be used to look up the tag + for the specified token; or None if the specified token + should not be handled by this tagger. + :rtype: (hashable) + """ + + def choose_tag(self, tokens, index, history): + context = self.context(tokens, index, history) + return self._context_to_tag.get(context) + + def size(self): + """ + :return: The number of entries in the table used by this + tagger to map from contexts to tags. + """ + return len(self._context_to_tag) + + def __repr__(self): + return f"<{self.__class__.__name__}: size={self.size()}>" + + def _train(self, tagged_corpus, cutoff=0, verbose=False): + """ + Initialize this ContextTagger's ``_context_to_tag`` table + based on the given training data. In particular, for each + context ``c`` in the training data, set + ``_context_to_tag[c]`` to the most frequent tag for that + context. However, exclude any contexts that are already + tagged perfectly by the backoff tagger(s). + + The old value of ``self._context_to_tag`` (if any) is discarded. + + :param tagged_corpus: A tagged corpus. Each item should be + a list of (word, tag tuples. + :param cutoff: If the most likely tag for a context occurs + fewer than cutoff times, then exclude it from the + context-to-tag table for the new tagger. + """ + + token_count = hit_count = 0 + + # A context is considered 'useful' if it's not already tagged + # perfectly by the backoff tagger. + useful_contexts = set() + + # Count how many times each tag occurs in each context. + fd = ConditionalFreqDist() + for sentence in tagged_corpus: + tokens, tags = zip(*sentence) + for index, (token, tag) in enumerate(sentence): + # Record the event. + token_count += 1 + context = self.context(tokens, index, tags[:index]) + if context is None: + continue + fd[context][tag] += 1 + # If the backoff got it wrong, this context is useful: + if self.backoff is None or tag != self.backoff.tag_one( + tokens, index, tags[:index] + ): + useful_contexts.add(context) + + # Build the context_to_tag table -- for each context, figure + # out what the most likely tag is. Only include contexts that + # we've seen at least `cutoff` times. + for context in useful_contexts: + best_tag = fd[context].max() + hits = fd[context][best_tag] + if hits > cutoff: + self._context_to_tag[context] = best_tag + hit_count += hits + + # Display some stats, if requested. + if verbose: + size = len(self._context_to_tag) + backoff = 100 - (hit_count * 100.0) / token_count + pruning = 100 - (size * 100.0) / len(fd.conditions()) + print("[Trained Unigram tagger:", end=" ") + print( + "size={}, backoff={:.2f}%, pruning={:.2f}%]".format( + size, backoff, pruning + ) + ) + + +###################################################################### +# Tagger Classes +###################################################################### + + +@jsontags.register_tag +class DefaultTagger(SequentialBackoffTagger): + """ + A tagger that assigns the same tag to every token. + + >>> from nltk.tag import DefaultTagger + >>> default_tagger = DefaultTagger('NN') + >>> list(default_tagger.tag('This is a test'.split())) + [('This', 'NN'), ('is', 'NN'), ('a', 'NN'), ('test', 'NN')] + + This tagger is recommended as a backoff tagger, in cases where + a more powerful tagger is unable to assign a tag to the word + (e.g. because the word was not seen during training). + + :param tag: The tag to assign to each token + :type tag: str + """ + + json_tag = "nltk.tag.sequential.DefaultTagger" + + def __init__(self, tag): + self._tag = tag + super().__init__(None) + + def encode_json_obj(self): + return self._tag + + @classmethod + def decode_json_obj(cls, obj): + tag = obj + return cls(tag) + + def choose_tag(self, tokens, index, history): + return self._tag # ignore token and history + + def __repr__(self): + return f"" + + +@jsontags.register_tag +class NgramTagger(ContextTagger): + """ + A tagger that chooses a token's tag based on its word string and + on the preceding n word's tags. In particular, a tuple + (tags[i-n:i-1], words[i]) is looked up in a table, and the + corresponding tag is returned. N-gram taggers are typically + trained on a tagged corpus. + + Train a new NgramTagger using the given training data or + the supplied model. In particular, construct a new tagger + whose table maps from each context (tag[i-n:i-1], word[i]) + to the most frequent tag for that context. But exclude any + contexts that are already tagged perfectly by the backoff + tagger. + + :param train: A tagged corpus consisting of a list of tagged + sentences, where each sentence is a list of (word, tag) tuples. + :param backoff: A backoff tagger, to be used by the new + tagger if it encounters an unknown context. + :param cutoff: If the most likely tag for a context occurs + fewer than *cutoff* times, then exclude it from the + context-to-tag table for the new tagger. + """ + + json_tag = "nltk.tag.sequential.NgramTagger" + + def __init__( + self, n, train=None, model=None, backoff=None, cutoff=0, verbose=False + ): + self._n = n + self._check_params(train, model) + + super().__init__(model, backoff) + + if train: + self._train(train, cutoff, verbose) + + def encode_json_obj(self): + _context_to_tag = {repr(k): v for k, v in self._context_to_tag.items()} + if "NgramTagger" in self.__class__.__name__: + return self._n, _context_to_tag, self.backoff + else: + return _context_to_tag, self.backoff + + @classmethod + def decode_json_obj(cls, obj): + try: + _n, _context_to_tag, backoff = obj + except ValueError: + _context_to_tag, backoff = obj + + if not _context_to_tag: + return backoff + + _context_to_tag = {ast.literal_eval(k): v for k, v in _context_to_tag.items()} + + if "NgramTagger" in cls.__name__: + return cls(_n, model=_context_to_tag, backoff=backoff) + else: + return cls(model=_context_to_tag, backoff=backoff) + + def context(self, tokens, index, history): + tag_context = tuple(history[max(0, index - self._n + 1) : index]) + return tag_context, tokens[index] + + +@jsontags.register_tag +class UnigramTagger(NgramTagger): + """ + Unigram Tagger + + The UnigramTagger finds the most likely tag for each word in a training + corpus, and then uses that information to assign tags to new tokens. + + >>> from nltk.corpus import brown + >>> from nltk.tag import UnigramTagger + >>> test_sent = brown.sents(categories='news')[0] + >>> unigram_tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500]) + >>> for tok, tag in unigram_tagger.tag(test_sent): + ... print("({}, {}), ".format(tok, tag)) # doctest: +NORMALIZE_WHITESPACE + (The, AT), (Fulton, NP-TL), (County, NN-TL), (Grand, JJ-TL), + (Jury, NN-TL), (said, VBD), (Friday, NR), (an, AT), + (investigation, NN), (of, IN), (Atlanta's, NP$), (recent, JJ), + (primary, NN), (election, NN), (produced, VBD), (``, ``), + (no, AT), (evidence, NN), ('', ''), (that, CS), (any, DTI), + (irregularities, NNS), (took, VBD), (place, NN), (., .), + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.UnigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(1, train, model, backoff, cutoff, verbose) + + def context(self, tokens, index, history): + return tokens[index] + + +@jsontags.register_tag +class BigramTagger(NgramTagger): + """ + A tagger that chooses a token's tag based its word string and on + the preceding words' tag. In particular, a tuple consisting + of the previous tag and the word is looked up in a table, and + the corresponding tag is returned. + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.BigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(2, train, model, backoff, cutoff, verbose) + + +@jsontags.register_tag +class TrigramTagger(NgramTagger): + """ + A tagger that chooses a token's tag based its word string and on + the preceding two words' tags. In particular, a tuple consisting + of the previous two tags and the word is looked up in a table, and + the corresponding tag is returned. + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.TrigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(3, train, model, backoff, cutoff, verbose) + + +@jsontags.register_tag +class AffixTagger(ContextTagger): + """ + A tagger that chooses a token's tag based on a leading or trailing + substring of its word string. (It is important to note that these + substrings are not necessarily "true" morphological affixes). In + particular, a fixed-length substring of the word is looked up in a + table, and the corresponding tag is returned. Affix taggers are + typically constructed by training them on a tagged corpus. + + Construct a new affix tagger. + + :param affix_length: The length of the affixes that should be + considered during training and tagging. Use negative + numbers for suffixes. + :param min_stem_length: Any words whose length is less than + min_stem_length+abs(affix_length) will be assigned a + tag of None by this tagger. + """ + + json_tag = "nltk.tag.sequential.AffixTagger" + + def __init__( + self, + train=None, + model=None, + affix_length=-3, + min_stem_length=2, + backoff=None, + cutoff=0, + verbose=False, + ): + + self._check_params(train, model) + + super().__init__(model, backoff) + + self._affix_length = affix_length + self._min_word_length = min_stem_length + abs(affix_length) + + if train: + self._train(train, cutoff, verbose) + + def encode_json_obj(self): + return ( + self._affix_length, + self._min_word_length, + self._context_to_tag, + self.backoff, + ) + + @classmethod + def decode_json_obj(cls, obj): + _affix_length, _min_word_length, _context_to_tag, backoff = obj + return cls( + affix_length=_affix_length, + min_stem_length=_min_word_length - abs(_affix_length), + model=_context_to_tag, + backoff=backoff, + ) + + def context(self, tokens, index, history): + token = tokens[index] + if len(token) < self._min_word_length: + return None + elif self._affix_length > 0: + return token[: self._affix_length] + else: + return token[self._affix_length :] + + +@jsontags.register_tag +class RegexpTagger(SequentialBackoffTagger): + r""" + Regular Expression Tagger + + The RegexpTagger assigns tags to tokens by comparing their + word strings to a series of regular expressions. The following tagger + uses word suffixes to make guesses about the correct Brown Corpus part + of speech tag: + + >>> from nltk.corpus import brown + >>> from nltk.tag import RegexpTagger + >>> test_sent = brown.sents(categories='news')[0] + >>> regexp_tagger = RegexpTagger( + ... [(r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers + ... (r'(The|the|A|a|An|an)$', 'AT'), # articles + ... (r'.*able$', 'JJ'), # adjectives + ... (r'.*ness$', 'NN'), # nouns formed from adjectives + ... (r'.*ly$', 'RB'), # adverbs + ... (r'.*s$', 'NNS'), # plural nouns + ... (r'.*ing$', 'VBG'), # gerunds + ... (r'.*ed$', 'VBD'), # past tense verbs + ... (r'.*', 'NN') # nouns (default) + ... ]) + >>> regexp_tagger + + >>> regexp_tagger.tag(test_sent) # doctest: +NORMALIZE_WHITESPACE + [('The', 'AT'), ('Fulton', 'NN'), ('County', 'NN'), ('Grand', 'NN'), ('Jury', 'NN'), + ('said', 'NN'), ('Friday', 'NN'), ('an', 'AT'), ('investigation', 'NN'), ('of', 'NN'), + ("Atlanta's", 'NNS'), ('recent', 'NN'), ('primary', 'NN'), ('election', 'NN'), + ('produced', 'VBD'), ('``', 'NN'), ('no', 'NN'), ('evidence', 'NN'), ("''", 'NN'), + ('that', 'NN'), ('any', 'NN'), ('irregularities', 'NNS'), ('took', 'NN'), + ('place', 'NN'), ('.', 'NN')] + + :type regexps: list(tuple(str, str)) + :param regexps: A list of ``(regexp, tag)`` pairs, each of + which indicates that a word matching ``regexp`` should + be tagged with ``tag``. The pairs will be evaluated in + order. If none of the regexps match a word, then the + optional backoff tagger is invoked, else it is + assigned the tag None. + """ + + json_tag = "nltk.tag.sequential.RegexpTagger" + + def __init__( + self, regexps: List[Tuple[str, str]], backoff: Optional[TaggerI] = None + ): + super().__init__(backoff) + self._regexps = [] + for regexp, tag in regexps: + try: + self._regexps.append((re.compile(regexp), tag)) + except Exception as e: + raise Exception( + f"Invalid RegexpTagger regexp: {e}\n- regexp: {regexp!r}\n- tag: {tag!r}" + ) from e + + def encode_json_obj(self): + return [(regexp.pattern, tag) for regexp, tag in self._regexps], self.backoff + + @classmethod + def decode_json_obj(cls, obj): + regexps, backoff = obj + return cls(regexps, backoff) + + def choose_tag(self, tokens, index, history): + for regexp, tag in self._regexps: + if re.match(regexp, tokens[index]): + return tag + return None + + def __repr__(self): + return f"" + + +class ClassifierBasedTagger(SequentialBackoffTagger, FeaturesetTaggerI): + """ + A sequential tagger that uses a classifier to choose the tag for + each token in a sentence. The featureset input for the classifier + is generated by a feature detector function:: + + feature_detector(tokens, index, history) -> featureset + + Where tokens is the list of unlabeled tokens in the sentence; + index is the index of the token for which feature detection + should be performed; and history is list of the tags for all + tokens before index. + + Construct a new classifier-based sequential tagger. + + :param feature_detector: A function used to generate the + featureset input for the classifier:: + feature_detector(tokens, index, history) -> featureset + + :param train: A tagged corpus consisting of a list of tagged + sentences, where each sentence is a list of (word, tag) tuples. + + :param backoff: A backoff tagger, to be used by the new tagger + if it encounters an unknown context. + + :param classifier_builder: A function used to train a new + classifier based on the data in *train*. It should take + one argument, a list of labeled featuresets (i.e., + (featureset, label) tuples). + + :param classifier: The classifier that should be used by the + tagger. This is only useful if you want to manually + construct the classifier; normally, you would use *train* + instead. + + :param backoff: A backoff tagger, used if this tagger is + unable to determine a tag for a given token. + + :param cutoff_prob: If specified, then this tagger will fall + back on its backoff tagger if the probability of the most + likely tag is less than *cutoff_prob*. + """ + + def __init__( + self, + feature_detector=None, + train=None, + classifier_builder=NaiveBayesClassifier.train, + classifier=None, + backoff=None, + cutoff_prob=None, + verbose=False, + ): + self._check_params(train, classifier) + + super().__init__(backoff) + + if (train and classifier) or (not train and not classifier): + raise ValueError( + "Must specify either training data or " "trained classifier." + ) + + if feature_detector is not None: + self._feature_detector = feature_detector + # The feature detector function, used to generate a featureset + # or each token: feature_detector(tokens, index, history) -> featureset + + self._cutoff_prob = cutoff_prob + """Cutoff probability for tagging -- if the probability of the + most likely tag is less than this, then use backoff.""" + + self._classifier = classifier + """The classifier used to choose a tag for each token.""" + + if train: + self._train(train, classifier_builder, verbose) + + def choose_tag(self, tokens, index, history): + # Use our feature detector to get the featureset. + featureset = self.feature_detector(tokens, index, history) + + # Use the classifier to pick a tag. If a cutoff probability + # was specified, then check that the tag's probability is + # higher than that cutoff first; otherwise, return None. + if self._cutoff_prob is None: + return self._classifier.classify(featureset) + + pdist = self._classifier.prob_classify(featureset) + tag = pdist.max() + return tag if pdist.prob(tag) >= self._cutoff_prob else None + + def _train(self, tagged_corpus, classifier_builder, verbose): + """ + Build a new classifier, based on the given training data + *tagged_corpus*. + """ + + classifier_corpus = [] + if verbose: + print("Constructing training corpus for classifier.") + + for sentence in tagged_corpus: + history = [] + untagged_sentence, tags = zip(*sentence) + for index in range(len(sentence)): + featureset = self.feature_detector(untagged_sentence, index, history) + classifier_corpus.append((featureset, tags[index])) + history.append(tags[index]) + + if verbose: + print(f"Training classifier ({len(classifier_corpus)} instances)") + self._classifier = classifier_builder(classifier_corpus) + + def __repr__(self): + return f"" + + def feature_detector(self, tokens, index, history): + """ + Return the feature detector that this tagger uses to generate + featuresets for its classifier. The feature detector is a + function with the signature:: + + feature_detector(tokens, index, history) -> featureset + + See ``classifier()`` + """ + return self._feature_detector(tokens, index, history) + + def classifier(self): + """ + Return the classifier that this tagger uses to choose a tag + for each word in a sentence. The input for this classifier is + generated using this tagger's feature detector. + See ``feature_detector()`` + """ + return self._classifier + + +class ClassifierBasedPOSTagger(ClassifierBasedTagger): + """ + A classifier based part of speech tagger. + """ + + def feature_detector(self, tokens, index, history): + word = tokens[index] + if index == 0: + prevword = prevprevword = None + prevtag = prevprevtag = None + elif index == 1: + prevword = tokens[index - 1].lower() + prevprevword = None + prevtag = history[index - 1] + prevprevtag = None + else: + prevword = tokens[index - 1].lower() + prevprevword = tokens[index - 2].lower() + prevtag = history[index - 1] + prevprevtag = history[index - 2] + + if re.match(r"[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$", word): + shape = "number" + elif re.match(r"\W+$", word): + shape = "punct" + elif re.match("[A-Z][a-z]+$", word): + shape = "upcase" + elif re.match("[a-z]+$", word): + shape = "downcase" + elif re.match(r"\w+$", word): + shape = "mixedcase" + else: + shape = "other" + + features = { + "prevtag": prevtag, + "prevprevtag": prevprevtag, + "word": word, + "word.lower": word.lower(), + "suffix3": word.lower()[-3:], + "suffix2": word.lower()[-2:], + "suffix1": word.lower()[-1:], + "prevprevword": prevprevword, + "prevword": prevword, + "prevtag+word": f"{prevtag}+{word.lower()}", + "prevprevtag+word": f"{prevprevtag}+{word.lower()}", + "prevword+word": f"{prevword}+{word.lower()}", + "shape": shape, + } + return features diff --git a/lib/python3.10/site-packages/nltk/tag/stanford.py b/lib/python3.10/site-packages/nltk/tag/stanford.py new file mode 100644 index 0000000000000000000000000000000000000000..7c21e2dd20dec5c3b242d0e5007a4bf51d8ef8f8 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/stanford.py @@ -0,0 +1,236 @@ +# Natural Language Toolkit: Interface to the Stanford Part-of-speech and Named-Entity Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nitin Madnani +# Rami Al-Rfou' +# URL: +# For license information, see LICENSE.TXT + +""" +A module for interfacing with the Stanford taggers. + +Tagger models need to be downloaded from https://nlp.stanford.edu/software +and the STANFORD_MODELS environment variable set (a colon-separated +list of paths). + +For more details see the documentation for StanfordPOSTagger and StanfordNERTagger. +""" + +import os +import tempfile +import warnings +from abc import abstractmethod +from subprocess import PIPE + +from nltk.internals import _java_options, config_java, find_file, find_jar, java +from nltk.tag.api import TaggerI + +_stanford_url = "https://nlp.stanford.edu/software" + + +class StanfordTagger(TaggerI): + """ + An interface to Stanford taggers. Subclasses must define: + + - ``_cmd`` property: A property that returns the command that will be + executed. + - ``_SEPARATOR``: Class constant that represents that character that + is used to separate the tokens from their tags. + - ``_JAR`` file: Class constant that represents the jar file name. + """ + + _SEPARATOR = "" + _JAR = "" + + def __init__( + self, + model_filename, + path_to_jar=None, + encoding="utf8", + verbose=False, + java_options="-mx1000m", + ): + # Raise deprecation warning. + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.6.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + if not self._JAR: + warnings.warn( + "The StanfordTagger class is not meant to be " + "instantiated directly. Did you mean " + "StanfordPOSTagger or StanfordNERTagger?" + ) + self._stanford_jar = find_jar( + self._JAR, path_to_jar, searchpath=(), url=_stanford_url, verbose=verbose + ) + + self._stanford_model = find_file( + model_filename, env_vars=("STANFORD_MODELS",), verbose=verbose + ) + + self._encoding = encoding + self.java_options = java_options + + @property + @abstractmethod + def _cmd(self): + """ + A property that returns the command that will be executed. + """ + + def tag(self, tokens): + # This function should return list of tuple rather than list of list + return sum(self.tag_sents([tokens]), []) + + def tag_sents(self, sentences): + encoding = self._encoding + default_options = " ".join(_java_options) + config_java(options=self.java_options, verbose=False) + + # Create a temporary input file + _input_fh, self._input_file_path = tempfile.mkstemp(text=True) + + cmd = list(self._cmd) + cmd.extend(["-encoding", encoding]) + + # Write the actual sentences to the temporary input file + _input_fh = os.fdopen(_input_fh, "wb") + _input = "\n".join(" ".join(x) for x in sentences) + if isinstance(_input, str) and encoding: + _input = _input.encode(encoding) + _input_fh.write(_input) + _input_fh.close() + + # Run the tagger and get the output + stanpos_output, _stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stanpos_output = stanpos_output.decode(encoding) + + # Delete the temporary file + os.unlink(self._input_file_path) + + # Return java configurations to their default values + config_java(options=default_options, verbose=False) + + return self.parse_output(stanpos_output, sentences) + + def parse_output(self, text, sentences=None): + # Output the tagged sentences + tagged_sentences = [] + for tagged_sentence in text.strip().split("\n"): + sentence = [] + for tagged_word in tagged_sentence.strip().split(): + word_tags = tagged_word.strip().split(self._SEPARATOR) + sentence.append( + ("".join(word_tags[:-1]), word_tags[-1].replace("0", "").upper()) + ) + tagged_sentences.append(sentence) + return tagged_sentences + + +class StanfordPOSTagger(StanfordTagger): + """ + A class for pos tagging with Stanford Tagger. The input is the paths to: + - a model trained on training data + - (optionally) the path to the stanford tagger jar file. If not specified here, + then this jar file must be specified in the CLASSPATH environment variable. + - (optionally) the encoding of the training data (default: UTF-8) + + Example: + + >>> from nltk.tag import StanfordPOSTagger + >>> st = StanfordPOSTagger('english-bidirectional-distsim.tagger') # doctest: +SKIP + >>> st.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')] + """ + + _SEPARATOR = "_" + _JAR = "stanford-postagger.jar" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def _cmd(self): + return [ + "edu.stanford.nlp.tagger.maxent.MaxentTagger", + "-model", + self._stanford_model, + "-textFile", + self._input_file_path, + "-tokenize", + "false", + "-outputFormatOptions", + "keepEmptySentences", + ] + + +class StanfordNERTagger(StanfordTagger): + """ + A class for Named-Entity Tagging with Stanford Tagger. The input is the paths to: + + - a model trained on training data + - (optionally) the path to the stanford tagger jar file. If not specified here, + then this jar file must be specified in the CLASSPATH environment variable. + - (optionally) the encoding of the training data (default: UTF-8) + + Example: + + >>> from nltk.tag import StanfordNERTagger + >>> st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz') # doctest: +SKIP + >>> st.tag('Rami Eid is studying at Stony Brook University in NY'.split()) # doctest: +SKIP + [('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'), + ('at', 'O'), ('Stony', 'ORGANIZATION'), ('Brook', 'ORGANIZATION'), + ('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'LOCATION')] + """ + + _SEPARATOR = "/" + _JAR = "stanford-ner.jar" + _FORMAT = "slashTags" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def _cmd(self): + # Adding -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false for not using stanford Tokenizer + return [ + "edu.stanford.nlp.ie.crf.CRFClassifier", + "-loadClassifier", + self._stanford_model, + "-textFile", + self._input_file_path, + "-outputFormat", + self._FORMAT, + "-tokenizerFactory", + "edu.stanford.nlp.process.WhitespaceTokenizer", + "-tokenizerOptions", + '"tokenizeNLs=false"', + ] + + def parse_output(self, text, sentences): + if self._FORMAT == "slashTags": + # Joint together to a big list + tagged_sentences = [] + for tagged_sentence in text.strip().split("\n"): + for tagged_word in tagged_sentence.strip().split(): + word_tags = tagged_word.strip().split(self._SEPARATOR) + tagged_sentences.append(("".join(word_tags[:-1]), word_tags[-1])) + + # Separate it according to the input + result = [] + start = 0 + for sent in sentences: + result.append(tagged_sentences[start : start + len(sent)]) + start += len(sent) + return result + + raise NotImplementedError diff --git a/lib/python3.10/site-packages/nltk/tag/tnt.py b/lib/python3.10/site-packages/nltk/tag/tnt.py new file mode 100644 index 0000000000000000000000000000000000000000..a505104d812532af561ee3d3d9d80611f78db2cd --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/tnt.py @@ -0,0 +1,579 @@ +# Natural Language Toolkit: TnT Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sam Huston +# +# URL: +# For license information, see LICENSE.TXT + +""" +Implementation of 'TnT - A Statisical Part of Speech Tagger' +by Thorsten Brants + +https://aclanthology.org/A00-1031.pdf +""" + +from math import log +from operator import itemgetter + +from nltk.probability import ConditionalFreqDist, FreqDist +from nltk.tag.api import TaggerI + + +class TnT(TaggerI): + """ + TnT - Statistical POS tagger + + IMPORTANT NOTES: + + * DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS + + - It is possible to provide an untrained POS tagger to + create tags for unknown words, see __init__ function + + * SHOULD BE USED WITH SENTENCE-DELIMITED INPUT + + - Due to the nature of this tagger, it works best when + trained over sentence delimited input. + - However it still produces good results if the training + data and testing data are separated on all punctuation eg: [,.?!] + - Input for training is expected to be a list of sentences + where each sentence is a list of (word, tag) tuples + - Input for tag function is a single sentence + Input for tagdata function is a list of sentences + Output is of a similar form + + * Function provided to process text that is unsegmented + + - Please see basic_sent_chop() + + + TnT uses a second order Markov model to produce tags for + a sequence of input, specifically: + + argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T) + + IE: the maximum projection of a set of probabilities + + The set of possible tags for a given word is derived + from the training data. It is the set of all tags + that exact word has been assigned. + + To speed up and get more precision, we can use log addition + to instead multiplication, specifically: + + argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] + + log(P(t_T+1|t_T)) + + The probability of a tag for a given word is the linear + interpolation of 3 markov models; a zero-order, first-order, + and a second order model. + + P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) + + l3*P(t_i| t_i-1, t_i-2) + + A beam search is used to limit the memory usage of the algorithm. + The degree of the beam can be changed using N in the initialization. + N represents the maximum number of possible solutions to maintain + while tagging. + + It is possible to differentiate the tags which are assigned to + capitalized words. However this does not result in a significant + gain in the accuracy of the results. + """ + + def __init__(self, unk=None, Trained=False, N=1000, C=False): + """ + Construct a TnT statistical tagger. Tagger must be trained + before being used to tag input. + + :param unk: instance of a POS tagger, conforms to TaggerI + :type unk: TaggerI + :param Trained: Indication that the POS tagger is trained or not + :type Trained: bool + :param N: Beam search degree (see above) + :type N: int + :param C: Capitalization flag + :type C: bool + + Initializer, creates frequency distributions to be used + for tagging + + _lx values represent the portion of the tri/bi/uni taggers + to be used to calculate the probability + + N value is the number of possible solutions to maintain + while tagging. A good value for this is 1000 + + C is a boolean value which specifies to use or + not use the Capitalization of the word as additional + information for tagging. + NOTE: using capitalization may not increase the accuracy + of the tagger + """ + + self._uni = FreqDist() + self._bi = ConditionalFreqDist() + self._tri = ConditionalFreqDist() + self._wd = ConditionalFreqDist() + self._eos = ConditionalFreqDist() + self._l1 = 0.0 + self._l2 = 0.0 + self._l3 = 0.0 + self._N = N + self._C = C + self._T = Trained + + self._unk = unk + + # statistical tools (ignore or delete me) + self.unknown = 0 + self.known = 0 + + def train(self, data): + """ + Uses a set of tagged data to train the tagger. + If an unknown word tagger is specified, + it is trained on the same data. + + :param data: List of lists of (word, tag) tuples + :type data: tuple(str) + """ + + # Ensure that local C flag is initialized before use + C = False + + if self._unk is not None and self._T == False: + self._unk.train(data) + + for sent in data: + history = [("BOS", False), ("BOS", False)] + for w, t in sent: + + # if capitalization is requested, + # and the word begins with a capital + # set local flag C to True + if self._C and w[0].isupper(): + C = True + + self._wd[w][t] += 1 + self._uni[(t, C)] += 1 + self._bi[history[1]][(t, C)] += 1 + self._tri[tuple(history)][(t, C)] += 1 + + history.append((t, C)) + history.pop(0) + + # set local flag C to false for the next word + C = False + + self._eos[t]["EOS"] += 1 + + # compute lambda values from the trained frequency distributions + self._compute_lambda() + + def _compute_lambda(self): + """ + creates lambda values based upon training data + + NOTE: no need to explicitly reference C, + it is contained within the tag variable :: tag == (tag,C) + + for each tag trigram (t1, t2, t3) + depending on the maximum value of + - f(t1,t2,t3)-1 / f(t1,t2)-1 + - f(t2,t3)-1 / f(t2)-1 + - f(t3)-1 / N-1 + + increment l3,l2, or l1 by f(t1,t2,t3) + + ISSUES -- Resolutions: + if 2 values are equal, increment both lambda values + by (f(t1,t2,t3) / 2) + """ + + # temporary lambda variables + tl1 = 0.0 + tl2 = 0.0 + tl3 = 0.0 + + # for each t1,t2 in system + for history in self._tri.conditions(): + (h1, h2) = history + + # for each t3 given t1,t2 in system + # (NOTE: tag actually represents (tag,C)) + # However no effect within this function + for tag in self._tri[history].keys(): + + # if there has only been 1 occurrence of this tag in the data + # then ignore this trigram. + if self._uni[tag] == 1: + continue + + # safe_div provides a safe floating point division + # it returns -1 if the denominator is 0 + c3 = self._safe_div( + (self._tri[history][tag] - 1), (self._tri[history].N() - 1) + ) + c2 = self._safe_div((self._bi[h2][tag] - 1), (self._bi[h2].N() - 1)) + c1 = self._safe_div((self._uni[tag] - 1), (self._uni.N() - 1)) + + # if c1 is the maximum value: + if (c1 > c3) and (c1 > c2): + tl1 += self._tri[history][tag] + + # if c2 is the maximum value + elif (c2 > c3) and (c2 > c1): + tl2 += self._tri[history][tag] + + # if c3 is the maximum value + elif (c3 > c2) and (c3 > c1): + tl3 += self._tri[history][tag] + + # if c3, and c2 are equal and larger than c1 + elif (c3 == c2) and (c3 > c1): + tl2 += self._tri[history][tag] / 2.0 + tl3 += self._tri[history][tag] / 2.0 + + # if c1, and c2 are equal and larger than c3 + # this might be a dumb thing to do....(not sure yet) + elif (c2 == c1) and (c1 > c3): + tl1 += self._tri[history][tag] / 2.0 + tl2 += self._tri[history][tag] / 2.0 + + # otherwise there might be a problem + # eg: all values = 0 + else: + pass + + # Lambda normalisation: + # ensures that l1+l2+l3 = 1 + self._l1 = tl1 / (tl1 + tl2 + tl3) + self._l2 = tl2 / (tl1 + tl2 + tl3) + self._l3 = tl3 / (tl1 + tl2 + tl3) + + def _safe_div(self, v1, v2): + """ + Safe floating point division function, does not allow division by 0 + returns -1 if the denominator is 0 + """ + if v2 == 0: + return -1 + else: + return v1 / v2 + + def tagdata(self, data): + """ + Tags each sentence in a list of sentences + + :param data:list of list of words + :type data: [[string,],] + :return: list of list of (word, tag) tuples + + Invokes tag(sent) function for each sentence + compiles the results into a list of tagged sentences + each tagged sentence is a list of (word, tag) tuples + """ + res = [] + for sent in data: + res1 = self.tag(sent) + res.append(res1) + return res + + def tag(self, data): + """ + Tags a single sentence + + :param data: list of words + :type data: [string,] + + :return: [(word, tag),] + + Calls recursive function '_tagword' + to produce a list of tags + + Associates the sequence of returned tags + with the correct words in the input sequence + + returns a list of (word, tag) tuples + """ + + current_state = [(["BOS", "BOS"], 0.0)] + + sent = list(data) + + tags = self._tagword(sent, current_state) + + res = [] + for i in range(len(sent)): + # unpack and discard the C flags + (t, C) = tags[i + 2] + res.append((sent[i], t)) + + return res + + def _tagword(self, sent, current_states): + """ + :param sent : List of words remaining in the sentence + :type sent : [word,] + :param current_states : List of possible tag combinations for + the sentence so far, and the log probability + associated with each tag combination + :type current_states : [([tag, ], logprob), ] + + Tags the first word in the sentence and + recursively tags the reminder of sentence + + Uses formula specified above to calculate the probability + of a particular tag + """ + + # if this word marks the end of the sentence, + # return the most probable tag + if sent == []: + (h, logp) = current_states[0] + return h + + # otherwise there are more words to be tagged + word = sent[0] + sent = sent[1:] + new_states = [] + + # if the Capitalisation is requested, + # initialise the flag for this word + C = False + if self._C and word[0].isupper(): + C = True + + # if word is known + # compute the set of possible tags + # and their associated log probabilities + if word in self._wd: + self.known += 1 + + for (history, curr_sent_logprob) in current_states: + logprobs = [] + + for t in self._wd[word].keys(): + tC = (t, C) + p_uni = self._uni.freq(tC) + p_bi = self._bi[history[-1]].freq(tC) + p_tri = self._tri[tuple(history[-2:])].freq(tC) + p_wd = self._wd[word][t] / self._uni[tC] + p = self._l1 * p_uni + self._l2 * p_bi + self._l3 * p_tri + p2 = log(p, 2) + log(p_wd, 2) + + # compute the result of appending each tag to this history + new_states.append((history + [tC], curr_sent_logprob + p2)) + + # otherwise a new word, set of possible tags is unknown + else: + self.unknown += 1 + + # since a set of possible tags, + # and the probability of each specific tag + # can not be returned from most classifiers: + # specify that any unknown words are tagged with certainty + p = 1 + + # if no unknown word tagger has been specified + # then use the tag 'Unk' + if self._unk is None: + tag = ("Unk", C) + + # otherwise apply the unknown word tagger + else: + [(_w, t)] = list(self._unk.tag([word])) + tag = (t, C) + + for (history, logprob) in current_states: + history.append(tag) + + new_states = current_states + + # now have computed a set of possible new_states + + # sort states by log prob + # set is now ordered greatest to least log probability + new_states.sort(reverse=True, key=itemgetter(1)) + + # del everything after N (threshold) + # this is the beam search cut + if len(new_states) > self._N: + new_states = new_states[: self._N] + + # compute the tags for the rest of the sentence + # return the best list of tags for the sentence + return self._tagword(sent, new_states) + + +######################################## +# helper function -- basic sentence tokenizer +######################################## + + +def basic_sent_chop(data, raw=True): + """ + Basic method for tokenizing input into sentences + for this tagger: + + :param data: list of tokens (words or (word, tag) tuples) + :type data: str or tuple(str, str) + :param raw: boolean flag marking the input data + as a list of words or a list of tagged words + :type raw: bool + :return: list of sentences + sentences are a list of tokens + tokens are the same as the input + + Function takes a list of tokens and separates the tokens into lists + where each list represents a sentence fragment + This function can separate both tagged and raw sequences into + basic sentences. + + Sentence markers are the set of [,.!?] + + This is a simple method which enhances the performance of the TnT + tagger. Better sentence tokenization will further enhance the results. + """ + + new_data = [] + curr_sent = [] + sent_mark = [",", ".", "?", "!"] + + if raw: + for word in data: + if word in sent_mark: + curr_sent.append(word) + new_data.append(curr_sent) + curr_sent = [] + else: + curr_sent.append(word) + + else: + for (word, tag) in data: + if word in sent_mark: + curr_sent.append((word, tag)) + new_data.append(curr_sent) + curr_sent = [] + else: + curr_sent.append((word, tag)) + return new_data + + +def demo(): + from nltk.corpus import brown + + sents = list(brown.tagged_sents()) + test = list(brown.sents()) + + tagger = TnT() + tagger.train(sents[200:1000]) + + tagged_data = tagger.tagdata(test[100:120]) + + for j in range(len(tagged_data)): + s = tagged_data[j] + t = sents[j + 100] + for i in range(len(s)): + print(s[i], "--", t[i]) + print() + + +def demo2(): + from nltk.corpus import treebank + + d = list(treebank.tagged_sents()) + + t = TnT(N=1000, C=False) + s = TnT(N=1000, C=True) + t.train(d[(11) * 100 :]) + s.train(d[(11) * 100 :]) + + for i in range(10): + tacc = t.accuracy(d[i * 100 : ((i + 1) * 100)]) + tp_un = t.unknown / (t.known + t.unknown) + tp_kn = t.known / (t.known + t.unknown) + t.unknown = 0 + t.known = 0 + + print("Capitalization off:") + print("Accuracy:", tacc) + print("Percentage known:", tp_kn) + print("Percentage unknown:", tp_un) + print("Accuracy over known words:", (tacc / tp_kn)) + + sacc = s.accuracy(d[i * 100 : ((i + 1) * 100)]) + sp_un = s.unknown / (s.known + s.unknown) + sp_kn = s.known / (s.known + s.unknown) + s.unknown = 0 + s.known = 0 + + print("Capitalization on:") + print("Accuracy:", sacc) + print("Percentage known:", sp_kn) + print("Percentage unknown:", sp_un) + print("Accuracy over known words:", (sacc / sp_kn)) + + +def demo3(): + from nltk.corpus import brown, treebank + + d = list(treebank.tagged_sents()) + e = list(brown.tagged_sents()) + + d = d[:1000] + e = e[:1000] + + d10 = int(len(d) * 0.1) + e10 = int(len(e) * 0.1) + + tknacc = 0 + sknacc = 0 + tallacc = 0 + sallacc = 0 + tknown = 0 + sknown = 0 + + for i in range(10): + + t = TnT(N=1000, C=False) + s = TnT(N=1000, C=False) + + dtest = d[(i * d10) : ((i + 1) * d10)] + etest = e[(i * e10) : ((i + 1) * e10)] + + dtrain = d[: (i * d10)] + d[((i + 1) * d10) :] + etrain = e[: (i * e10)] + e[((i + 1) * e10) :] + + t.train(dtrain) + s.train(etrain) + + tacc = t.accuracy(dtest) + tp_un = t.unknown / (t.known + t.unknown) + tp_kn = t.known / (t.known + t.unknown) + tknown += tp_kn + t.unknown = 0 + t.known = 0 + + sacc = s.accuracy(etest) + sp_un = s.unknown / (s.known + s.unknown) + sp_kn = s.known / (s.known + s.unknown) + sknown += sp_kn + s.unknown = 0 + s.known = 0 + + tknacc += tacc / tp_kn + sknacc += sacc / tp_kn + tallacc += tacc + sallacc += sacc + + # print(i+1, (tacc / tp_kn), i+1, (sacc / tp_kn), i+1, tacc, i+1, sacc) + + print("brown: acc over words known:", 10 * tknacc) + print(" : overall accuracy:", 10 * tallacc) + print(" : words known:", 10 * tknown) + print("treebank: acc over words known:", 10 * sknacc) + print(" : overall accuracy:", 10 * sallacc) + print(" : words known:", 10 * sknown) diff --git a/lib/python3.10/site-packages/nltk/tag/util.py b/lib/python3.10/site-packages/nltk/tag/util.py new file mode 100644 index 0000000000000000000000000000000000000000..e35b98195f2b7b448775a49795e0f34d612624a6 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tag/util.py @@ -0,0 +1,72 @@ +# Natural Language Toolkit: Tagger Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + + +def str2tuple(s, sep="/"): + """ + Given the string representation of a tagged token, return the + corresponding tuple representation. The rightmost occurrence of + *sep* in *s* will be used to divide *s* into a word string and + a tag string. If *sep* does not occur in *s*, return (s, None). + + >>> from nltk.tag.util import str2tuple + >>> str2tuple('fly/NN') + ('fly', 'NN') + + :type s: str + :param s: The string representation of a tagged token. + :type sep: str + :param sep: The separator string used to separate word strings + from tags. + """ + loc = s.rfind(sep) + if loc >= 0: + return (s[:loc], s[loc + len(sep) :].upper()) + else: + return (s, None) + + +def tuple2str(tagged_token, sep="/"): + """ + Given the tuple representation of a tagged token, return the + corresponding string representation. This representation is + formed by concatenating the token's word string, followed by the + separator, followed by the token's tag. (If the tag is None, + then just return the bare word string.) + + >>> from nltk.tag.util import tuple2str + >>> tagged_token = ('fly', 'NN') + >>> tuple2str(tagged_token) + 'fly/NN' + + :type tagged_token: tuple(str, str) + :param tagged_token: The tuple representation of a tagged token. + :type sep: str + :param sep: The separator string used to separate word strings + from tags. + """ + word, tag = tagged_token + if tag is None: + return word + else: + assert sep not in tag, "tag may not contain sep!" + return f"{word}{sep}{tag}" + + +def untag(tagged_sentence): + """ + Given a tagged sentence, return an untagged version of that + sentence. I.e., return a list containing the first element + of each tuple in *tagged_sentence*. + + >>> from nltk.tag.util import untag + >>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')]) + ['John', 'saw', 'Mary'] + + """ + return [w for (w, t) in tagged_sentence] diff --git a/lib/python3.10/site-packages/nltk/twitter/__init__.py b/lib/python3.10/site-packages/nltk/twitter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cd14ffb4703bf38bb349cc19cca2d97b6df29f77 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/twitter/__init__.py @@ -0,0 +1,35 @@ +# Natural Language Toolkit: Twitter +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Twitter Package + +This package contains classes for retrieving Tweet documents using the +Twitter API. + +""" +try: + import twython +except ImportError: + import warnings + + warnings.warn( + "The twython library has not been installed. " + "Some functionality from the twitter package will not be available." + ) +else: + from nltk.twitter.util import Authenticate, credsfromfile + from nltk.twitter.twitterclient import ( + Streamer, + Query, + Twitter, + TweetViewer, + TweetWriter, + ) + + +from nltk.twitter.common import json2csv diff --git a/lib/python3.10/site-packages/nltk/twitter/api.py b/lib/python3.10/site-packages/nltk/twitter/api.py new file mode 100644 index 0000000000000000000000000000000000000000..71248b176340abd0d0d7d51e8ed68700f7948e13 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/twitter/api.py @@ -0,0 +1,145 @@ +# Natural Language Toolkit: Twitter API +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +This module provides an interface for TweetHandlers, and support for timezone +handling. +""" + +import time as _time +from abc import ABCMeta, abstractmethod +from datetime import datetime, timedelta, timezone, tzinfo + + +class LocalTimezoneOffsetWithUTC(tzinfo): + """ + This is not intended to be a general purpose class for dealing with the + local timezone. In particular: + + * it assumes that the date passed has been created using + `datetime(..., tzinfo=Local)`, where `Local` is an instance of + the object `LocalTimezoneOffsetWithUTC`; + * for such an object, it returns the offset with UTC, used for date comparisons. + + Reference: https://docs.python.org/3/library/datetime.html + """ + + STDOFFSET = timedelta(seconds=-_time.timezone) + + if _time.daylight: + DSTOFFSET = timedelta(seconds=-_time.altzone) + else: + DSTOFFSET = STDOFFSET + + def utcoffset(self, dt): + """ + Access the relevant time offset. + """ + return self.DSTOFFSET + + +LOCAL = LocalTimezoneOffsetWithUTC() + + +class BasicTweetHandler(metaclass=ABCMeta): + """ + Minimal implementation of `TweetHandler`. + + Counts the number of Tweets and decides when the client should stop + fetching them. + """ + + def __init__(self, limit=20): + self.limit = limit + self.counter = 0 + + """ + A flag to indicate to the client whether to stop fetching data given + some condition (e.g., reaching a date limit). + """ + self.do_stop = False + + """ + Stores the id of the last fetched Tweet to handle pagination. + """ + self.max_id = None + + def do_continue(self): + """ + Returns `False` if the client should stop fetching Tweets. + """ + return self.counter < self.limit and not self.do_stop + + +class TweetHandlerI(BasicTweetHandler): + """ + Interface class whose subclasses should implement a handle method that + Twitter clients can delegate to. + """ + + def __init__(self, limit=20, upper_date_limit=None, lower_date_limit=None): + """ + :param int limit: The number of data items to process in the current\ + round of processing. + + :param tuple upper_date_limit: The date at which to stop collecting\ + new data. This should be entered as a tuple which can serve as the\ + argument to `datetime.datetime`.\ + E.g. `date_limit=(2015, 4, 1, 12, 40)` for 12:30 pm on April 1 2015. + + :param tuple lower_date_limit: The date at which to stop collecting\ + new data. See `upper_data_limit` for formatting. + """ + BasicTweetHandler.__init__(self, limit) + + self.upper_date_limit = None + self.lower_date_limit = None + if upper_date_limit: + self.upper_date_limit = datetime(*upper_date_limit, tzinfo=LOCAL) + if lower_date_limit: + self.lower_date_limit = datetime(*lower_date_limit, tzinfo=LOCAL) + + self.startingup = True + + @abstractmethod + def handle(self, data): + """ + Deal appropriately with data returned by the Twitter API + """ + + @abstractmethod + def on_finish(self): + """ + Actions when the tweet limit has been reached + """ + + def check_date_limit(self, data, verbose=False): + """ + Validate date limits. + """ + if self.upper_date_limit or self.lower_date_limit: + date_fmt = "%a %b %d %H:%M:%S +0000 %Y" + tweet_date = datetime.strptime(data["created_at"], date_fmt).replace( + tzinfo=timezone.utc + ) + if (self.upper_date_limit and tweet_date > self.upper_date_limit) or ( + self.lower_date_limit and tweet_date < self.lower_date_limit + ): + if self.upper_date_limit: + message = "earlier" + date_limit = self.upper_date_limit + else: + message = "later" + date_limit = self.lower_date_limit + if verbose: + print( + "Date limit {} is {} than date of current tweet {}".format( + date_limit, message, tweet_date + ) + ) + self.do_stop = True diff --git a/lib/python3.10/site-packages/nltk/twitter/common.py b/lib/python3.10/site-packages/nltk/twitter/common.py new file mode 100644 index 0000000000000000000000000000000000000000..d9428724cfa8cae69e14d899cb73eee5607475d0 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/twitter/common.py @@ -0,0 +1,270 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +Utility functions for the `twitterclient` module which do not require +the `twython` library to have been installed. +""" +import csv +import gzip +import json + +from nltk.internals import deprecated + +HIER_SEPARATOR = "." + + +def extract_fields(tweet, fields): + """ + Extract field values from a full tweet and return them as a list + + :param json tweet: The tweet in JSON format + :param list fields: The fields to be extracted from the tweet + :rtype: list(str) + """ + out = [] + for field in fields: + try: + _add_field_to_out(tweet, field, out) + except TypeError as e: + raise RuntimeError( + "Fatal error when extracting fields. Cannot find field ", field + ) from e + return out + + +def _add_field_to_out(json, field, out): + if _is_composed_key(field): + key, value = _get_key_value_composed(field) + _add_field_to_out(json[key], value, out) + else: + out += [json[field]] + + +def _is_composed_key(field): + return HIER_SEPARATOR in field + + +def _get_key_value_composed(field): + out = field.split(HIER_SEPARATOR) + # there could be up to 3 levels + key = out[0] + value = HIER_SEPARATOR.join(out[1:]) + return key, value + + +def _get_entity_recursive(json, entity): + if not json: + return None + elif isinstance(json, dict): + for key, value in json.items(): + if key == entity: + return value + # 'entities' and 'extended_entities' are wrappers in Twitter json + # structure that contain other Twitter objects. See: + # https://dev.twitter.com/overview/api/entities-in-twitter-objects + + if key == "entities" or key == "extended_entities": + candidate = _get_entity_recursive(value, entity) + if candidate is not None: + return candidate + return None + elif isinstance(json, list): + for item in json: + candidate = _get_entity_recursive(item, entity) + if candidate is not None: + return candidate + return None + else: + return None + + +def json2csv( + fp, outfile, fields, encoding="utf8", errors="replace", gzip_compress=False +): + """ + Extract selected fields from a file of line-separated JSON tweets and + write to a file in CSV format. + + This utility function allows a file of full tweets to be easily converted + to a CSV file for easier processing. For example, just TweetIDs or + just the text content of the Tweets can be extracted. + + Additionally, the function allows combinations of fields of other Twitter + objects (mainly the users, see below). + + For Twitter entities (e.g. hashtags of a Tweet), and for geolocation, see + `json2csv_entities` + + :param str infile: The name of the file containing full tweets + + :param str outfile: The name of the text file where results should be\ + written + + :param list fields: The list of fields to be extracted. Useful examples\ + are 'id_str' for the tweetID and 'text' for the text of the tweet. See\ + for a full list of fields.\ + e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count']\ + Additionally, it allows IDs from other Twitter objects, e. g.,\ + ['id', 'text', 'user.id', 'user.followers_count', 'user.friends_count'] + + :param error: Behaviour for encoding errors, see\ + https://docs.python.org/3/library/codecs.html#codec-base-classes + + :param gzip_compress: if `True`, output files are compressed with gzip + """ + (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) + # write the list of fields as header + writer.writerow(fields) + # process the file + for line in fp: + tweet = json.loads(line) + row = extract_fields(tweet, fields) + writer.writerow(row) + outf.close() + + +@deprecated("Use open() and csv.writer() directly instead.") +def outf_writer_compat(outfile, encoding, errors, gzip_compress=False): + """Get a CSV writer with optional compression.""" + return _outf_writer(outfile, encoding, errors, gzip_compress) + + +def _outf_writer(outfile, encoding, errors, gzip_compress=False): + if gzip_compress: + outf = gzip.open(outfile, "wt", newline="", encoding=encoding, errors=errors) + else: + outf = open(outfile, "w", newline="", encoding=encoding, errors=errors) + writer = csv.writer(outf) + return (writer, outf) + + +def json2csv_entities( + tweets_file, + outfile, + main_fields, + entity_type, + entity_fields, + encoding="utf8", + errors="replace", + gzip_compress=False, +): + """ + Extract selected fields from a file of line-separated JSON tweets and + write to a file in CSV format. + + This utility function allows a file of full Tweets to be easily converted + to a CSV file for easier processing of Twitter entities. For example, the + hashtags or media elements of a tweet can be extracted. + + It returns one line per entity of a Tweet, e.g. if a tweet has two hashtags + there will be two lines in the output file, one per hashtag + + :param tweets_file: the file-like object containing full Tweets + + :param str outfile: The path of the text file where results should be\ + written + + :param list main_fields: The list of fields to be extracted from the main\ + object, usually the tweet. Useful examples: 'id_str' for the tweetID. See\ + for a full list of fields. + e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count'] + If `entity_type` is expressed with hierarchy, then it is the list of\ + fields of the object that corresponds to the key of the entity_type,\ + (e.g., for entity_type='user.urls', the fields in the main_fields list\ + belong to the user object; for entity_type='place.bounding_box', the\ + files in the main_field list belong to the place object of the tweet). + + :param list entity_type: The name of the entity: 'hashtags', 'media',\ + 'urls' and 'user_mentions' for the tweet object. For a user object,\ + this needs to be expressed with a hierarchy: `'user.urls'`. For the\ + bounding box of the Tweet location, use `'place.bounding_box'`. + + :param list entity_fields: The list of fields to be extracted from the\ + entity. E.g. `['text']` (of the Tweet) + + :param error: Behaviour for encoding errors, see\ + https://docs.python.org/3/library/codecs.html#codec-base-classes + + :param gzip_compress: if `True`, output files are compressed with gzip + """ + + (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) + header = get_header_field_list(main_fields, entity_type, entity_fields) + writer.writerow(header) + for line in tweets_file: + tweet = json.loads(line) + if _is_composed_key(entity_type): + key, value = _get_key_value_composed(entity_type) + object_json = _get_entity_recursive(tweet, key) + if not object_json: + # this can happen in the case of "place" + continue + object_fields = extract_fields(object_json, main_fields) + items = _get_entity_recursive(object_json, value) + _write_to_file(object_fields, items, entity_fields, writer) + else: + tweet_fields = extract_fields(tweet, main_fields) + items = _get_entity_recursive(tweet, entity_type) + _write_to_file(tweet_fields, items, entity_fields, writer) + outf.close() + + +def get_header_field_list(main_fields, entity_type, entity_fields): + if _is_composed_key(entity_type): + key, value = _get_key_value_composed(entity_type) + main_entity = key + sub_entity = value + else: + main_entity = None + sub_entity = entity_type + + if main_entity: + output1 = [HIER_SEPARATOR.join([main_entity, x]) for x in main_fields] + else: + output1 = main_fields + output2 = [HIER_SEPARATOR.join([sub_entity, x]) for x in entity_fields] + return output1 + output2 + + +def _write_to_file(object_fields, items, entity_fields, writer): + if not items: + # it could be that the entity is just not present for the tweet + # e.g. tweet hashtag is always present, even as [], however + # tweet media may not be present + return + if isinstance(items, dict): + # this happens e.g. for "place" of a tweet + row = object_fields + # there might be composed keys in de list of required fields + entity_field_values = [x for x in entity_fields if not _is_composed_key(x)] + entity_field_composed = [x for x in entity_fields if _is_composed_key(x)] + for field in entity_field_values: + value = items[field] + if isinstance(value, list): + row += value + else: + row += [value] + # now check required dictionaries + for d in entity_field_composed: + kd, vd = _get_key_value_composed(d) + json_dict = items[kd] + if not isinstance(json_dict, dict): + raise RuntimeError( + """Key {} does not contain a dictionary + in the json file""".format( + kd + ) + ) + row += [json_dict[vd]] + writer.writerow(row) + return + # in general it is a list + for item in items: + row = object_fields + extract_fields(item, entity_fields) + writer.writerow(row) diff --git a/lib/python3.10/site-packages/nltk/twitter/twitter_demo.py b/lib/python3.10/site-packages/nltk/twitter/twitter_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..554bdfef511190b28504f9ded8dc8a6098e16ed9 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/twitter/twitter_demo.py @@ -0,0 +1,306 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +Examples to demo the :py:mod:`twitterclient` code. + +These demo functions should all run, with the following caveats: + +* You must have obtained API keys from Twitter, and installed them according to + the instructions in the `twitter HOWTO `_. + +* If you are on a slow network, some of the calls to the Twitter API may + timeout. + +* If you are being rate limited while searching, you will receive a 420 + error response. + +* Your terminal window / console must be able to display UTF-8 encoded characters. + +For documentation about the Twitter APIs, see `The Streaming APIs Overview +`_ and `The REST APIs Overview +`_. + +For error codes see Twitter's +`Error Codes and Responses ` +""" + +import datetime +import json +from functools import wraps +from io import StringIO + +from nltk.twitter import ( + Query, + Streamer, + TweetViewer, + TweetWriter, + Twitter, + credsfromfile, +) + +SPACER = "###################################" + + +def verbose(func): + """Decorator for demo functions""" + + @wraps(func) + def with_formatting(*args, **kwargs): + print() + print(SPACER) + print("Using %s" % (func.__name__)) + print(SPACER) + return func(*args, **kwargs) + + return with_formatting + + +def yesterday(): + """ + Get yesterday's datetime as a 5-tuple. + """ + date = datetime.datetime.now() + date -= datetime.timedelta(days=1) + date_tuple = date.timetuple()[:6] + return date_tuple + + +def setup(): + """ + Initialize global variables for the demos. + """ + global USERIDS, FIELDS + + USERIDS = ["759251", "612473", "15108702", "6017542", "2673523800"] + # UserIDs corresponding to\ + # @CNN, @BBCNews, @ReutersLive, @BreakingNews, @AJELive + FIELDS = ["id_str"] + + +@verbose +def twitterclass_demo(): + """ + Use the simplified :class:`Twitter` class to write some tweets to a file. + """ + tw = Twitter() + print("Track from the public stream\n") + tw.tweets(keywords="love, hate", limit=10) # public stream + print(SPACER) + print("Search past Tweets\n") + tw = Twitter() + tw.tweets(keywords="love, hate", stream=False, limit=10) # search past tweets + print(SPACER) + print( + "Follow two accounts in the public stream" + + " -- be prepared to wait a few minutes\n" + ) + tw = Twitter() + tw.tweets(follow=["759251", "6017542"], stream=True, limit=5) # public stream + + +@verbose +def sampletoscreen_demo(limit=20): + """ + Sample from the Streaming API and send output to terminal. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetViewer(limit=limit)) + client.sample() + + +@verbose +def tracktoscreen_demo(track="taylor swift", limit=10): + """ + Track keywords from the public Streaming API and send output to terminal. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetViewer(limit=limit)) + client.filter(track=track) + + +@verbose +def search_demo(keywords="nltk"): + """ + Use the REST API to search for past tweets containing a given keyword. + """ + oauth = credsfromfile() + client = Query(**oauth) + for tweet in client.search_tweets(keywords=keywords, limit=10): + print(tweet["text"]) + + +@verbose +def tweets_by_user_demo(user="NLTK_org", count=200): + """ + Use the REST API to search for past tweets by a given user. + """ + oauth = credsfromfile() + client = Query(**oauth) + client.register(TweetWriter()) + client.user_tweets(user, count) + + +@verbose +def lookup_by_userid_demo(): + """ + Use the REST API to convert a userID to a screen name. + """ + oauth = credsfromfile() + client = Query(**oauth) + user_info = client.user_info_from_id(USERIDS) + for info in user_info: + name = info["screen_name"] + followers = info["followers_count"] + following = info["friends_count"] + print(f"{name}, followers: {followers}, following: {following}") + + +@verbose +def followtoscreen_demo(limit=10): + """ + Using the Streaming API, select just the tweets from a specified list of + userIDs. + + This is will only give results in a reasonable time if the users in + question produce a high volume of tweets, and may even so show some delay. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetViewer(limit=limit)) + client.statuses.filter(follow=USERIDS) + + +@verbose +def streamtofile_demo(limit=20): + """ + Write 20 tweets sampled from the public Streaming API to a file. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetWriter(limit=limit, repeat=False)) + client.statuses.sample() + + +@verbose +def limit_by_time_demo(keywords="nltk"): + """ + Query the REST API for Tweets about NLTK since yesterday and send + the output to terminal. + + This example makes the assumption that there are sufficient Tweets since + yesterday for the date to be an effective cut-off. + """ + date = yesterday() + dt_date = datetime.datetime(*date) + oauth = credsfromfile() + client = Query(**oauth) + client.register(TweetViewer(limit=100, lower_date_limit=date)) + + print(f"Cutoff date: {dt_date}\n") + + for tweet in client.search_tweets(keywords=keywords): + print("{} ".format(tweet["created_at"]), end="") + client.handler.handle(tweet) + + +@verbose +def corpusreader_demo(): + """ + Use `TwitterCorpusReader` tp read a file of tweets, and print out + + * some full tweets in JSON format; + * some raw strings from the tweets (i.e., the value of the `text` field); and + * the result of tokenising the raw strings. + + """ + from nltk.corpus import twitter_samples as tweets + + print() + print("Complete tweet documents") + print(SPACER) + for tweet in tweets.docs("tweets.20150430-223406.json")[:1]: + print(json.dumps(tweet, indent=1, sort_keys=True)) + + print() + print("Raw tweet strings:") + print(SPACER) + for text in tweets.strings("tweets.20150430-223406.json")[:15]: + print(text) + + print() + print("Tokenized tweet strings:") + print(SPACER) + for toks in tweets.tokenized("tweets.20150430-223406.json")[:15]: + print(toks) + + +@verbose +def expand_tweetids_demo(): + """ + Given a file object containing a list of Tweet IDs, fetch the + corresponding full Tweets, if available. + + """ + ids_f = StringIO( + """\ + 588665495492124672 + 588665495487909888 + 588665495508766721 + 588665495513006080 + 588665495517200384 + 588665495487811584 + 588665495525588992 + 588665495487844352 + 588665495492014081 + 588665495512948737""" + ) + oauth = credsfromfile() + client = Query(**oauth) + hydrated = client.expand_tweetids(ids_f) + + for tweet in hydrated: + id_str = tweet["id_str"] + print(f"id: {id_str}") + text = tweet["text"] + if text.startswith("@null"): + text = "[Tweet not available]" + print(text + "\n") + + +ALL = [ + twitterclass_demo, + sampletoscreen_demo, + tracktoscreen_demo, + search_demo, + tweets_by_user_demo, + lookup_by_userid_demo, + followtoscreen_demo, + streamtofile_demo, + limit_by_time_demo, + corpusreader_demo, + expand_tweetids_demo, +] + +""" +Select demo functions to run. E.g. replace the following line with "DEMOS = +ALL[8:]" to execute only the final three demos. +""" +DEMOS = ALL[:] + +if __name__ == "__main__": + setup() + + for demo in DEMOS: + demo() + + print("\n" + SPACER) + print("All demos completed") + print(SPACER) diff --git a/lib/python3.10/site-packages/nltk/twitter/twitterclient.py b/lib/python3.10/site-packages/nltk/twitter/twitterclient.py new file mode 100644 index 0000000000000000000000000000000000000000..d556738e0849faf35454166cec8a5949fcca93dc --- /dev/null +++ b/lib/python3.10/site-packages/nltk/twitter/twitterclient.py @@ -0,0 +1,564 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + + +""" +NLTK Twitter client + +This module offers methods for collecting and processing Tweets. Most of the +functionality depends on access to the Twitter APIs, and this is handled via +the third party Twython library. + +If one of the methods below returns an integer, it is probably a `Twitter +error code `_. For +example, the response of '420' means that you have reached the limit of the +requests you can currently make to the Twitter API. Currently, `rate limits +for the search API `_ are +divided into 15 minute windows. +""" + +import datetime +import gzip +import itertools +import json +import os +import time + +import requests +from twython import Twython, TwythonStreamer +from twython.exceptions import TwythonError, TwythonRateLimitError + +from nltk.twitter.api import BasicTweetHandler, TweetHandlerI +from nltk.twitter.util import credsfromfile, guess_path + + +class Streamer(TwythonStreamer): + """ + Retrieve data from the Twitter Streaming API. + + The streaming API requires + `OAuth 1.0 `_ authentication. + """ + + def __init__(self, app_key, app_secret, oauth_token, oauth_token_secret): + + self.handler = None + self.do_continue = True + TwythonStreamer.__init__( + self, app_key, app_secret, oauth_token, oauth_token_secret + ) + + def register(self, handler): + """ + Register a method for handling Tweets. + + :param TweetHandlerI handler: method for viewing + """ + self.handler = handler + + def on_success(self, data): + """ + :param data: response from Twitter API + """ + if self.do_continue: + if self.handler is not None: + if "text" in data: + self.handler.counter += 1 + self.handler.handle(data) + self.do_continue = self.handler.do_continue() + else: + raise ValueError("No data handler has been registered.") + else: + self.disconnect() + self.handler.on_finish() + + def on_error(self, status_code, data): + """ + :param status_code: The status code returned by the Twitter API + :param data: The response from Twitter API + + """ + print(status_code) + + def sample(self): + """ + Wrapper for 'statuses / sample' API call + """ + while self.do_continue: + + # Stream in an endless loop until limit is reached. See twython + # issue 288: https://github.com/ryanmcgrath/twython/issues/288 + # colditzjb commented on 9 Dec 2014 + + try: + self.statuses.sample() + except requests.exceptions.ChunkedEncodingError as e: + if e is not None: + print(f"Error (stream will continue): {e}") + continue + + def filter(self, track="", follow="", lang="en"): + """ + Wrapper for 'statuses / filter' API call + """ + while self.do_continue: + # Stream in an endless loop until limit is reached + + try: + if track == "" and follow == "": + msg = "Please supply a value for 'track', 'follow'" + raise ValueError(msg) + self.statuses.filter(track=track, follow=follow, lang=lang) + except requests.exceptions.ChunkedEncodingError as e: + if e is not None: + print(f"Error (stream will continue): {e}") + continue + + +class Query(Twython): + """ + Retrieve data from the Twitter REST API. + """ + + def __init__(self, app_key, app_secret, oauth_token, oauth_token_secret): + """ + :param app_key: (optional) Your applications key + :param app_secret: (optional) Your applications secret key + :param oauth_token: (optional) When using **OAuth 1**, combined with + oauth_token_secret to make authenticated calls + :param oauth_token_secret: (optional) When using **OAuth 1** combined + with oauth_token to make authenticated calls + """ + self.handler = None + self.do_continue = True + Twython.__init__(self, app_key, app_secret, oauth_token, oauth_token_secret) + + def register(self, handler): + """ + Register a method for handling Tweets. + + :param TweetHandlerI handler: method for viewing or writing Tweets to a file. + """ + self.handler = handler + + def expand_tweetids(self, ids_f, verbose=True): + """ + Given a file object containing a list of Tweet IDs, fetch the + corresponding full Tweets from the Twitter API. + + The API call `statuses/lookup` will fail to retrieve a Tweet if the + user has deleted it. + + This call to the Twitter API is rate-limited. See + for details. + + :param ids_f: input file object consisting of Tweet IDs, one to a line + :return: iterable of Tweet objects in JSON format + """ + ids = [line.strip() for line in ids_f if line] + + if verbose: + print(f"Counted {len(ids)} Tweet IDs in {ids_f}.") + + # The Twitter endpoint takes lists of up to 100 ids, so we chunk the + # ids. + id_chunks = [ids[i : i + 100] for i in range(0, len(ids), 100)] + + chunked_tweets = (self.lookup_status(id=chunk) for chunk in id_chunks) + + return itertools.chain.from_iterable(chunked_tweets) + + def _search_tweets(self, keywords, limit=100, lang="en"): + """ + Assumes that the handler has been informed. Fetches Tweets from + search_tweets generator output and passses them to handler + + :param str keywords: A list of query terms to search for, written as\ + a comma-separated string. + :param int limit: Number of Tweets to process + :param str lang: language + """ + while True: + tweets = self.search_tweets( + keywords=keywords, limit=limit, lang=lang, max_id=self.handler.max_id + ) + for tweet in tweets: + self.handler.handle(tweet) + if not (self.handler.do_continue() and self.handler.repeat): + break + self.handler.on_finish() + + def search_tweets( + self, + keywords, + limit=100, + lang="en", + max_id=None, + retries_after_twython_exception=0, + ): + """ + Call the REST API ``'search/tweets'`` endpoint with some plausible + defaults. See `the Twitter search documentation + `_ for more information + about admissible search parameters. + + :param str keywords: A list of query terms to search for, written as\ + a comma-separated string + :param int limit: Number of Tweets to process + :param str lang: language + :param int max_id: id of the last tweet fetched + :param int retries_after_twython_exception: number of retries when\ + searching Tweets before raising an exception + :rtype: python generator + """ + if not self.handler: + # if no handler is provided, `BasicTweetHandler` provides minimum + # functionality for limiting the number of Tweets retrieved + self.handler = BasicTweetHandler(limit=limit) + + count_from_query = 0 + if max_id: + self.handler.max_id = max_id + else: + results = self.search( + q=keywords, count=min(100, limit), lang=lang, result_type="recent" + ) + count = len(results["statuses"]) + if count == 0: + print("No Tweets available through REST API for those keywords") + return + count_from_query = count + self.handler.max_id = results["statuses"][count - 1]["id"] - 1 + + for result in results["statuses"]: + yield result + self.handler.counter += 1 + if self.handler.do_continue() == False: + return + + # Pagination loop: keep fetching Tweets until the desired count is + # reached while dealing with Twitter rate limits. + retries = 0 + while count_from_query < limit: + try: + mcount = min(100, limit - count_from_query) + results = self.search( + q=keywords, + count=mcount, + lang=lang, + max_id=self.handler.max_id, + result_type="recent", + ) + except TwythonRateLimitError as e: + print(f"Waiting for 15 minutes -{e}") + time.sleep(15 * 60) # wait 15 minutes + continue + except TwythonError as e: + print(f"Fatal error in Twython request -{e}") + if retries_after_twython_exception == retries: + raise e + retries += 1 + + count = len(results["statuses"]) + if count == 0: + print("No more Tweets available through rest api") + return + count_from_query += count + # the max_id is also present in the Tweet metadata + # results['search_metadata']['next_results'], but as part of a + # query and difficult to fetch. This is doing the equivalent + # (last tweet id minus one) + self.handler.max_id = results["statuses"][count - 1]["id"] - 1 + + for result in results["statuses"]: + yield result + self.handler.counter += 1 + if self.handler.do_continue() == False: + return + + def user_info_from_id(self, userids): + """ + Convert a list of userIDs into a variety of information about the users. + + See . + + :param list userids: A list of integer strings corresponding to Twitter userIDs + :rtype: list(json) + """ + return [self.show_user(user_id=userid) for userid in userids] + + def user_tweets(self, screen_name, limit, include_rts="false"): + """ + Return a collection of the most recent Tweets posted by the user + + :param str user: The user's screen name; the initial '@' symbol\ + should be omitted + :param int limit: The number of Tweets to recover; 200 is the maximum allowed + :param str include_rts: Whether to include statuses which have been\ + retweeted by the user; possible values are 'true' and 'false' + """ + data = self.get_user_timeline( + screen_name=screen_name, count=limit, include_rts=include_rts + ) + for item in data: + self.handler.handle(item) + + +class Twitter: + """ + Wrapper class with restricted functionality and fewer options. + """ + + def __init__(self): + self._oauth = credsfromfile() + self.streamer = Streamer(**self._oauth) + self.query = Query(**self._oauth) + + def tweets( + self, + keywords="", + follow="", + to_screen=True, + stream=True, + limit=100, + date_limit=None, + lang="en", + repeat=False, + gzip_compress=False, + ): + """ + Process some Tweets in a simple manner. + + :param str keywords: Keywords to use for searching or filtering + :param list follow: UserIDs to use for filtering Tweets from the public stream + :param bool to_screen: If `True`, display the tweet texts on the screen,\ + otherwise print to a file + + :param bool stream: If `True`, use the live public stream,\ + otherwise search past public Tweets + + :param int limit: The number of data items to process in the current\ + round of processing. + + :param tuple date_limit: The date at which to stop collecting\ + new data. This should be entered as a tuple which can serve as the\ + argument to `datetime.datetime`.\ + E.g. `date_limit=(2015, 4, 1, 12, 40)` for 12:30 pm on April 1 2015. + Note that, in the case of streaming, this is the maximum date, i.e.\ + a date in the future; if not, it is the minimum date, i.e. a date\ + in the past + + :param str lang: language + + :param bool repeat: A flag to determine whether multiple files should\ + be written. If `True`, the length of each file will be set by the\ + value of `limit`. Use only if `to_screen` is `False`. See also + :py:func:`handle`. + + :param gzip_compress: if `True`, output files are compressed with gzip. + """ + if stream: + upper_date_limit = date_limit + lower_date_limit = None + else: + upper_date_limit = None + lower_date_limit = date_limit + + if to_screen: + handler = TweetViewer( + limit=limit, + upper_date_limit=upper_date_limit, + lower_date_limit=lower_date_limit, + ) + else: + handler = TweetWriter( + limit=limit, + upper_date_limit=upper_date_limit, + lower_date_limit=lower_date_limit, + repeat=repeat, + gzip_compress=gzip_compress, + ) + + if to_screen: + handler = TweetViewer(limit=limit) + else: + if stream: + upper_date_limit = date_limit + lower_date_limit = None + else: + upper_date_limit = None + lower_date_limit = date_limit + + handler = TweetWriter( + limit=limit, + upper_date_limit=upper_date_limit, + lower_date_limit=lower_date_limit, + repeat=repeat, + gzip_compress=gzip_compress, + ) + + if stream: + self.streamer.register(handler) + if keywords == "" and follow == "": + self.streamer.sample() + else: + self.streamer.filter(track=keywords, follow=follow, lang=lang) + else: + self.query.register(handler) + if keywords == "": + raise ValueError("Please supply at least one keyword to search for.") + else: + self.query._search_tweets(keywords, limit=limit, lang=lang) + + +class TweetViewer(TweetHandlerI): + """ + Handle data by sending it to the terminal. + """ + + def handle(self, data): + """ + Direct data to `sys.stdout` + + :return: return ``False`` if processing should cease, otherwise return ``True``. + :rtype: bool + :param data: Tweet object returned by Twitter API + """ + text = data["text"] + print(text) + + self.check_date_limit(data) + if self.do_stop: + return + + def on_finish(self): + print(f"Written {self.counter} Tweets") + + +class TweetWriter(TweetHandlerI): + """ + Handle data by writing it to a file. + """ + + def __init__( + self, + limit=2000, + upper_date_limit=None, + lower_date_limit=None, + fprefix="tweets", + subdir="twitter-files", + repeat=False, + gzip_compress=False, + ): + """ + The difference between the upper and lower date limits depends on + whether Tweets are coming in an ascending date order (i.e. when + streaming) or descending date order (i.e. when searching past Tweets). + + :param int limit: number of data items to process in the current\ + round of processing. + + :param tuple upper_date_limit: The date at which to stop collecting new\ + data. This should be entered as a tuple which can serve as the\ + argument to `datetime.datetime`. E.g. `upper_date_limit=(2015, 4, 1, 12,\ + 40)` for 12:30 pm on April 1 2015. + + :param tuple lower_date_limit: The date at which to stop collecting new\ + data. See `upper_data_limit` for formatting. + + :param str fprefix: The prefix to use in creating file names for Tweet\ + collections. + + :param str subdir: The name of the directory where Tweet collection\ + files should be stored. + + :param bool repeat: flag to determine whether multiple files should be\ + written. If `True`, the length of each file will be set by the value\ + of `limit`. See also :py:func:`handle`. + + :param gzip_compress: if `True`, output files are compressed with gzip. + """ + self.fprefix = fprefix + self.subdir = guess_path(subdir) + self.gzip_compress = gzip_compress + self.fname = self.timestamped_file() + self.repeat = repeat + self.output = None + TweetHandlerI.__init__(self, limit, upper_date_limit, lower_date_limit) + + def timestamped_file(self): + """ + :return: timestamped file name + :rtype: str + """ + subdir = self.subdir + fprefix = self.fprefix + if subdir: + if not os.path.exists(subdir): + os.mkdir(subdir) + + fname = os.path.join(subdir, fprefix) + fmt = "%Y%m%d-%H%M%S" + timestamp = datetime.datetime.now().strftime(fmt) + if self.gzip_compress: + suffix = ".gz" + else: + suffix = "" + outfile = f"{fname}.{timestamp}.json{suffix}" + return outfile + + def handle(self, data): + """ + Write Twitter data as line-delimited JSON into one or more files. + + :return: return `False` if processing should cease, otherwise return `True`. + :param data: tweet object returned by Twitter API + """ + if self.startingup: + if self.gzip_compress: + self.output = gzip.open(self.fname, "w") + else: + self.output = open(self.fname, "w") + print(f"Writing to {self.fname}") + + json_data = json.dumps(data) + if self.gzip_compress: + self.output.write((json_data + "\n").encode("utf-8")) + else: + self.output.write(json_data + "\n") + + self.check_date_limit(data) + if self.do_stop: + return + + self.startingup = False + + def on_finish(self): + print(f"Written {self.counter} Tweets") + if self.output: + self.output.close() + + def do_continue(self): + if self.repeat == False: + return TweetHandlerI.do_continue(self) + + if self.do_stop: + # stop for a functional cause (e.g. date limit) + return False + + if self.counter == self.limit: + # repeat is True, thus close output file and + # create a new one + self._restart_file() + return True + + def _restart_file(self): + self.on_finish() + self.fname = self.timestamped_file() + self.startingup = True + self.counter = 0 diff --git a/lib/python3.10/site-packages/nltk/twitter/util.py b/lib/python3.10/site-packages/nltk/twitter/util.py new file mode 100644 index 0000000000000000000000000000000000000000..adfa08853867280da85642778c3e9fb89a532574 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/twitter/util.py @@ -0,0 +1,147 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +Authentication utilities to accompany `twitterclient`. +""" + +import os +import pprint + +from twython import Twython + + +def credsfromfile(creds_file=None, subdir=None, verbose=False): + """ + Convenience function for authentication + """ + return Authenticate().load_creds( + creds_file=creds_file, subdir=subdir, verbose=verbose + ) + + +class Authenticate: + """ + Methods for authenticating with Twitter. + """ + + def __init__(self): + self.creds_file = "credentials.txt" + self.creds_fullpath = None + + self.oauth = {} + try: + self.twitter_dir = os.environ["TWITTER"] + self.creds_subdir = self.twitter_dir + except KeyError: + self.twitter_dir = None + self.creds_subdir = None + + def load_creds(self, creds_file=None, subdir=None, verbose=False): + """ + Read OAuth credentials from a text file. + + File format for OAuth 1:: + + app_key=YOUR_APP_KEY + app_secret=YOUR_APP_SECRET + oauth_token=OAUTH_TOKEN + oauth_token_secret=OAUTH_TOKEN_SECRET + + + File format for OAuth 2:: + + app_key=YOUR_APP_KEY + app_secret=YOUR_APP_SECRET + access_token=ACCESS_TOKEN + + :param str file_name: File containing credentials. ``None`` (default) reads + data from `TWITTER/'credentials.txt'` + """ + if creds_file is not None: + self.creds_file = creds_file + + if subdir is None: + if self.creds_subdir is None: + msg = ( + "Supply a value to the 'subdir' parameter or" + + " set the TWITTER environment variable." + ) + raise ValueError(msg) + else: + self.creds_subdir = subdir + + self.creds_fullpath = os.path.normpath( + os.path.join(self.creds_subdir, self.creds_file) + ) + + if not os.path.isfile(self.creds_fullpath): + raise OSError(f"Cannot find file {self.creds_fullpath}") + + with open(self.creds_fullpath) as infile: + if verbose: + print(f"Reading credentials file {self.creds_fullpath}") + + for line in infile: + if "=" in line: + name, value = line.split("=", 1) + self.oauth[name.strip()] = value.strip() + + self._validate_creds_file(verbose=verbose) + + return self.oauth + + def _validate_creds_file(self, verbose=False): + """Check validity of a credentials file.""" + oauth1 = False + oauth1_keys = ["app_key", "app_secret", "oauth_token", "oauth_token_secret"] + oauth2 = False + oauth2_keys = ["app_key", "app_secret", "access_token"] + if all(k in self.oauth for k in oauth1_keys): + oauth1 = True + elif all(k in self.oauth for k in oauth2_keys): + oauth2 = True + + if not (oauth1 or oauth2): + msg = f"Missing or incorrect entries in {self.creds_file}\n" + msg += pprint.pformat(self.oauth) + raise ValueError(msg) + elif verbose: + print(f'Credentials file "{self.creds_file}" looks good') + + +def add_access_token(creds_file=None): + """ + For OAuth 2, retrieve an access token for an app and append it to a + credentials file. + """ + if creds_file is None: + path = os.path.dirname(__file__) + creds_file = os.path.join(path, "credentials2.txt") + oauth2 = credsfromfile(creds_file=creds_file) + app_key = oauth2["app_key"] + app_secret = oauth2["app_secret"] + + twitter = Twython(app_key, app_secret, oauth_version=2) + access_token = twitter.obtain_access_token() + tok = f"access_token={access_token}\n" + with open(creds_file, "a") as infile: + print(tok, file=infile) + + +def guess_path(pth): + """ + If the path is not absolute, guess that it is a subdirectory of the + user's home directory. + + :param str pth: The pathname of the directory where files of tweets should be written + """ + if os.path.isabs(pth): + return pth + else: + return os.path.expanduser(os.path.join("~", pth)) diff --git a/lib/python3.10/site-packages/rpds/rpds.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/rpds/rpds.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a34fcab1e903baf7326a0573edb9f39cb94fd2c3 --- /dev/null +++ b/lib/python3.10/site-packages/rpds/rpds.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:941637458f4b80f989c33357a9d50936ad3556fbecd6bb5c58a153946b779a04 +size 1039544