Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lib/python3.10/site-packages/grpc/aio/_call.py +764 -0
- lib/python3.10/site-packages/grpc/aio/_typing.py +43 -0
- lib/python3.10/site-packages/grpc/beta/__init__.py +13 -0
- lib/python3.10/site-packages/grpc/beta/_client_adaptations.py +1015 -0
- lib/python3.10/site-packages/grpc/beta/_metadata.py +56 -0
- lib/python3.10/site-packages/grpc/beta/_server_adaptations.py +465 -0
- lib/python3.10/site-packages/grpc/beta/implementations.py +345 -0
- lib/python3.10/site-packages/grpc/beta/interfaces.py +163 -0
- lib/python3.10/site-packages/grpc/beta/utilities.py +153 -0
- lib/python3.10/site-packages/nltk-3.8.1.dist-info/entry_points.txt +3 -0
- lib/python3.10/site-packages/nltk/VERSION +1 -0
- lib/python3.10/site-packages/nltk/__init__.py +209 -0
- lib/python3.10/site-packages/nltk/book.py +213 -0
- lib/python3.10/site-packages/nltk/cli.py +55 -0
- lib/python3.10/site-packages/nltk/collections.py +661 -0
- lib/python3.10/site-packages/nltk/collocations.py +412 -0
- lib/python3.10/site-packages/nltk/compat.py +43 -0
- lib/python3.10/site-packages/nltk/data.py +1441 -0
- lib/python3.10/site-packages/nltk/decorators.py +251 -0
- lib/python3.10/site-packages/nltk/downloader.py +2559 -0
- lib/python3.10/site-packages/nltk/featstruct.py +0 -0
- lib/python3.10/site-packages/nltk/grammar.py +1708 -0
- lib/python3.10/site-packages/nltk/help.py +64 -0
- lib/python3.10/site-packages/nltk/internals.py +1123 -0
- lib/python3.10/site-packages/nltk/jsontags.py +65 -0
- lib/python3.10/site-packages/nltk/langnames.py +730 -0
- lib/python3.10/site-packages/nltk/lazyimport.py +142 -0
- lib/python3.10/site-packages/nltk/probability.py +2578 -0
- lib/python3.10/site-packages/nltk/test/bleu.doctest +29 -0
- lib/python3.10/site-packages/nltk/test/ccg.doctest +376 -0
- lib/python3.10/site-packages/nltk/test/chat80.doctest +232 -0
- lib/python3.10/site-packages/nltk/test/childes_fixt.py +13 -0
- lib/python3.10/site-packages/nltk/test/classify_fixt.py +5 -0
- lib/python3.10/site-packages/nltk/test/concordance.doctest +75 -0
- lib/python3.10/site-packages/nltk/test/corpus.doctest +0 -0
- lib/python3.10/site-packages/nltk/test/data.doctest +387 -0
- lib/python3.10/site-packages/nltk/test/dependency.doctest +241 -0
- lib/python3.10/site-packages/nltk/test/discourse.doctest +552 -0
- lib/python3.10/site-packages/nltk/test/featgram.doctest +610 -0
- lib/python3.10/site-packages/nltk/test/featstruct.doctest +1229 -0
- lib/python3.10/site-packages/nltk/test/framenet.doctest +288 -0
- lib/python3.10/site-packages/nltk/test/generate.doctest +78 -0
- lib/python3.10/site-packages/nltk/test/gensim.doctest +141 -0
- lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py +9 -0
- lib/python3.10/site-packages/nltk/test/grammar.doctest +69 -0
- lib/python3.10/site-packages/nltk/test/grammartestsuites.doctest +109 -0
- lib/python3.10/site-packages/nltk/test/japanese.doctest +48 -0
- lib/python3.10/site-packages/nltk/test/lm.doctest +135 -0
- lib/python3.10/site-packages/nltk/test/meteor.doctest +54 -0
- lib/python3.10/site-packages/nltk/test/nonmonotonic.doctest +293 -0
lib/python3.10/site-packages/grpc/aio/_call.py
ADDED
|
@@ -0,0 +1,764 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 gRPC authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Invocation-side implementation of gRPC Asyncio Python."""
|
| 15 |
+
|
| 16 |
+
import asyncio
|
| 17 |
+
import enum
|
| 18 |
+
from functools import partial
|
| 19 |
+
import inspect
|
| 20 |
+
import logging
|
| 21 |
+
import traceback
|
| 22 |
+
from typing import (
|
| 23 |
+
Any,
|
| 24 |
+
AsyncIterator,
|
| 25 |
+
Generator,
|
| 26 |
+
Generic,
|
| 27 |
+
Optional,
|
| 28 |
+
Tuple,
|
| 29 |
+
Union,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
import grpc
|
| 33 |
+
from grpc import _common
|
| 34 |
+
from grpc._cython import cygrpc
|
| 35 |
+
|
| 36 |
+
from . import _base_call
|
| 37 |
+
from ._metadata import Metadata
|
| 38 |
+
from ._typing import DeserializingFunction
|
| 39 |
+
from ._typing import DoneCallbackType
|
| 40 |
+
from ._typing import EOFType
|
| 41 |
+
from ._typing import MetadatumType
|
| 42 |
+
from ._typing import RequestIterableType
|
| 43 |
+
from ._typing import RequestType
|
| 44 |
+
from ._typing import ResponseType
|
| 45 |
+
from ._typing import SerializingFunction
|
| 46 |
+
|
| 47 |
+
__all__ = "AioRpcError", "Call", "UnaryUnaryCall", "UnaryStreamCall"
|
| 48 |
+
|
| 49 |
+
_LOCAL_CANCELLATION_DETAILS = "Locally cancelled by application!"
|
| 50 |
+
_GC_CANCELLATION_DETAILS = "Cancelled upon garbage collection!"
|
| 51 |
+
_RPC_ALREADY_FINISHED_DETAILS = "RPC already finished."
|
| 52 |
+
_RPC_HALF_CLOSED_DETAILS = 'RPC is half closed after calling "done_writing".'
|
| 53 |
+
_API_STYLE_ERROR = (
|
| 54 |
+
"The iterator and read/write APIs may not be mixed on a single RPC."
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
_OK_CALL_REPRESENTATION = (
|
| 58 |
+
'<{} of RPC that terminated with:\n\tstatus = {}\n\tdetails = "{}"\n>'
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
_NON_OK_CALL_REPRESENTATION = (
|
| 62 |
+
"<{} of RPC that terminated with:\n"
|
| 63 |
+
"\tstatus = {}\n"
|
| 64 |
+
'\tdetails = "{}"\n'
|
| 65 |
+
'\tdebug_error_string = "{}"\n'
|
| 66 |
+
">"
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
_LOGGER = logging.getLogger(__name__)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class AioRpcError(grpc.RpcError):
|
| 73 |
+
"""An implementation of RpcError to be used by the asynchronous API.
|
| 74 |
+
|
| 75 |
+
Raised RpcError is a snapshot of the final status of the RPC, values are
|
| 76 |
+
determined. Hence, its methods no longer needs to be coroutines.
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
_code: grpc.StatusCode
|
| 80 |
+
_details: Optional[str]
|
| 81 |
+
_initial_metadata: Optional[Metadata]
|
| 82 |
+
_trailing_metadata: Optional[Metadata]
|
| 83 |
+
_debug_error_string: Optional[str]
|
| 84 |
+
|
| 85 |
+
def __init__(
|
| 86 |
+
self,
|
| 87 |
+
code: grpc.StatusCode,
|
| 88 |
+
initial_metadata: Metadata,
|
| 89 |
+
trailing_metadata: Metadata,
|
| 90 |
+
details: Optional[str] = None,
|
| 91 |
+
debug_error_string: Optional[str] = None,
|
| 92 |
+
) -> None:
|
| 93 |
+
"""Constructor.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
code: The status code with which the RPC has been finalized.
|
| 97 |
+
details: Optional details explaining the reason of the error.
|
| 98 |
+
initial_metadata: Optional initial metadata that could be sent by the
|
| 99 |
+
Server.
|
| 100 |
+
trailing_metadata: Optional metadata that could be sent by the Server.
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
super().__init__()
|
| 104 |
+
self._code = code
|
| 105 |
+
self._details = details
|
| 106 |
+
self._initial_metadata = initial_metadata
|
| 107 |
+
self._trailing_metadata = trailing_metadata
|
| 108 |
+
self._debug_error_string = debug_error_string
|
| 109 |
+
|
| 110 |
+
def code(self) -> grpc.StatusCode:
|
| 111 |
+
"""Accesses the status code sent by the server.
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
The `grpc.StatusCode` status code.
|
| 115 |
+
"""
|
| 116 |
+
return self._code
|
| 117 |
+
|
| 118 |
+
def details(self) -> Optional[str]:
|
| 119 |
+
"""Accesses the details sent by the server.
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
The description of the error.
|
| 123 |
+
"""
|
| 124 |
+
return self._details
|
| 125 |
+
|
| 126 |
+
def initial_metadata(self) -> Metadata:
|
| 127 |
+
"""Accesses the initial metadata sent by the server.
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
The initial metadata received.
|
| 131 |
+
"""
|
| 132 |
+
return self._initial_metadata
|
| 133 |
+
|
| 134 |
+
def trailing_metadata(self) -> Metadata:
|
| 135 |
+
"""Accesses the trailing metadata sent by the server.
|
| 136 |
+
|
| 137 |
+
Returns:
|
| 138 |
+
The trailing metadata received.
|
| 139 |
+
"""
|
| 140 |
+
return self._trailing_metadata
|
| 141 |
+
|
| 142 |
+
def debug_error_string(self) -> str:
|
| 143 |
+
"""Accesses the debug error string sent by the server.
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
The debug error string received.
|
| 147 |
+
"""
|
| 148 |
+
return self._debug_error_string
|
| 149 |
+
|
| 150 |
+
def _repr(self) -> str:
|
| 151 |
+
"""Assembles the error string for the RPC error."""
|
| 152 |
+
return _NON_OK_CALL_REPRESENTATION.format(
|
| 153 |
+
self.__class__.__name__,
|
| 154 |
+
self._code,
|
| 155 |
+
self._details,
|
| 156 |
+
self._debug_error_string,
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
def __repr__(self) -> str:
|
| 160 |
+
return self._repr()
|
| 161 |
+
|
| 162 |
+
def __str__(self) -> str:
|
| 163 |
+
return self._repr()
|
| 164 |
+
|
| 165 |
+
def __reduce__(self):
|
| 166 |
+
return (
|
| 167 |
+
type(self),
|
| 168 |
+
(
|
| 169 |
+
self._code,
|
| 170 |
+
self._initial_metadata,
|
| 171 |
+
self._trailing_metadata,
|
| 172 |
+
self._details,
|
| 173 |
+
self._debug_error_string,
|
| 174 |
+
),
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def _create_rpc_error(
|
| 179 |
+
initial_metadata: Metadata, status: cygrpc.AioRpcStatus
|
| 180 |
+
) -> AioRpcError:
|
| 181 |
+
return AioRpcError(
|
| 182 |
+
_common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[status.code()],
|
| 183 |
+
Metadata.from_tuple(initial_metadata),
|
| 184 |
+
Metadata.from_tuple(status.trailing_metadata()),
|
| 185 |
+
details=status.details(),
|
| 186 |
+
debug_error_string=status.debug_error_string(),
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class Call:
|
| 191 |
+
"""Base implementation of client RPC Call object.
|
| 192 |
+
|
| 193 |
+
Implements logic around final status, metadata and cancellation.
|
| 194 |
+
"""
|
| 195 |
+
|
| 196 |
+
_loop: asyncio.AbstractEventLoop
|
| 197 |
+
_code: grpc.StatusCode
|
| 198 |
+
_cython_call: cygrpc._AioCall
|
| 199 |
+
_metadata: Tuple[MetadatumType, ...]
|
| 200 |
+
_request_serializer: SerializingFunction
|
| 201 |
+
_response_deserializer: DeserializingFunction
|
| 202 |
+
|
| 203 |
+
def __init__(
|
| 204 |
+
self,
|
| 205 |
+
cython_call: cygrpc._AioCall,
|
| 206 |
+
metadata: Metadata,
|
| 207 |
+
request_serializer: SerializingFunction,
|
| 208 |
+
response_deserializer: DeserializingFunction,
|
| 209 |
+
loop: asyncio.AbstractEventLoop,
|
| 210 |
+
) -> None:
|
| 211 |
+
self._loop = loop
|
| 212 |
+
self._cython_call = cython_call
|
| 213 |
+
self._metadata = tuple(metadata)
|
| 214 |
+
self._request_serializer = request_serializer
|
| 215 |
+
self._response_deserializer = response_deserializer
|
| 216 |
+
|
| 217 |
+
def __del__(self) -> None:
|
| 218 |
+
# The '_cython_call' object might be destructed before Call object
|
| 219 |
+
if hasattr(self, "_cython_call"):
|
| 220 |
+
if not self._cython_call.done():
|
| 221 |
+
self._cancel(_GC_CANCELLATION_DETAILS)
|
| 222 |
+
|
| 223 |
+
def cancelled(self) -> bool:
|
| 224 |
+
return self._cython_call.cancelled()
|
| 225 |
+
|
| 226 |
+
def _cancel(self, details: str) -> bool:
|
| 227 |
+
"""Forwards the application cancellation reasoning."""
|
| 228 |
+
if not self._cython_call.done():
|
| 229 |
+
self._cython_call.cancel(details)
|
| 230 |
+
return True
|
| 231 |
+
else:
|
| 232 |
+
return False
|
| 233 |
+
|
| 234 |
+
def cancel(self) -> bool:
|
| 235 |
+
return self._cancel(_LOCAL_CANCELLATION_DETAILS)
|
| 236 |
+
|
| 237 |
+
def done(self) -> bool:
|
| 238 |
+
return self._cython_call.done()
|
| 239 |
+
|
| 240 |
+
def add_done_callback(self, callback: DoneCallbackType) -> None:
|
| 241 |
+
cb = partial(callback, self)
|
| 242 |
+
self._cython_call.add_done_callback(cb)
|
| 243 |
+
|
| 244 |
+
def time_remaining(self) -> Optional[float]:
|
| 245 |
+
return self._cython_call.time_remaining()
|
| 246 |
+
|
| 247 |
+
async def initial_metadata(self) -> Metadata:
|
| 248 |
+
raw_metadata_tuple = await self._cython_call.initial_metadata()
|
| 249 |
+
return Metadata.from_tuple(raw_metadata_tuple)
|
| 250 |
+
|
| 251 |
+
async def trailing_metadata(self) -> Metadata:
|
| 252 |
+
raw_metadata_tuple = (
|
| 253 |
+
await self._cython_call.status()
|
| 254 |
+
).trailing_metadata()
|
| 255 |
+
return Metadata.from_tuple(raw_metadata_tuple)
|
| 256 |
+
|
| 257 |
+
async def code(self) -> grpc.StatusCode:
|
| 258 |
+
cygrpc_code = (await self._cython_call.status()).code()
|
| 259 |
+
return _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[cygrpc_code]
|
| 260 |
+
|
| 261 |
+
async def details(self) -> str:
|
| 262 |
+
return (await self._cython_call.status()).details()
|
| 263 |
+
|
| 264 |
+
async def debug_error_string(self) -> str:
|
| 265 |
+
return (await self._cython_call.status()).debug_error_string()
|
| 266 |
+
|
| 267 |
+
async def _raise_for_status(self) -> None:
|
| 268 |
+
if self._cython_call.is_locally_cancelled():
|
| 269 |
+
raise asyncio.CancelledError()
|
| 270 |
+
code = await self.code()
|
| 271 |
+
if code != grpc.StatusCode.OK:
|
| 272 |
+
raise _create_rpc_error(
|
| 273 |
+
await self.initial_metadata(), await self._cython_call.status()
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
def _repr(self) -> str:
|
| 277 |
+
return repr(self._cython_call)
|
| 278 |
+
|
| 279 |
+
def __repr__(self) -> str:
|
| 280 |
+
return self._repr()
|
| 281 |
+
|
| 282 |
+
def __str__(self) -> str:
|
| 283 |
+
return self._repr()
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
class _APIStyle(enum.IntEnum):
|
| 287 |
+
UNKNOWN = 0
|
| 288 |
+
ASYNC_GENERATOR = 1
|
| 289 |
+
READER_WRITER = 2
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
class _UnaryResponseMixin(Call, Generic[ResponseType]):
|
| 293 |
+
_call_response: asyncio.Task
|
| 294 |
+
|
| 295 |
+
def _init_unary_response_mixin(self, response_task: asyncio.Task):
|
| 296 |
+
self._call_response = response_task
|
| 297 |
+
|
| 298 |
+
def cancel(self) -> bool:
|
| 299 |
+
if super().cancel():
|
| 300 |
+
self._call_response.cancel()
|
| 301 |
+
return True
|
| 302 |
+
else:
|
| 303 |
+
return False
|
| 304 |
+
|
| 305 |
+
def __await__(self) -> Generator[Any, None, ResponseType]:
|
| 306 |
+
"""Wait till the ongoing RPC request finishes."""
|
| 307 |
+
try:
|
| 308 |
+
response = yield from self._call_response
|
| 309 |
+
except asyncio.CancelledError:
|
| 310 |
+
# Even if we caught all other CancelledError, there is still
|
| 311 |
+
# this corner case. If the application cancels immediately after
|
| 312 |
+
# the Call object is created, we will observe this
|
| 313 |
+
# `CancelledError`.
|
| 314 |
+
if not self.cancelled():
|
| 315 |
+
self.cancel()
|
| 316 |
+
raise
|
| 317 |
+
|
| 318 |
+
# NOTE(lidiz) If we raise RpcError in the task, and users doesn't
|
| 319 |
+
# 'await' on it. AsyncIO will log 'Task exception was never retrieved'.
|
| 320 |
+
# Instead, if we move the exception raising here, the spam stops.
|
| 321 |
+
# Unfortunately, there can only be one 'yield from' in '__await__'. So,
|
| 322 |
+
# we need to access the private instance variable.
|
| 323 |
+
if response is cygrpc.EOF:
|
| 324 |
+
if self._cython_call.is_locally_cancelled():
|
| 325 |
+
raise asyncio.CancelledError()
|
| 326 |
+
else:
|
| 327 |
+
raise _create_rpc_error(
|
| 328 |
+
self._cython_call._initial_metadata,
|
| 329 |
+
self._cython_call._status,
|
| 330 |
+
)
|
| 331 |
+
else:
|
| 332 |
+
return response
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
class _StreamResponseMixin(Call):
|
| 336 |
+
_message_aiter: AsyncIterator[ResponseType]
|
| 337 |
+
_preparation: asyncio.Task
|
| 338 |
+
_response_style: _APIStyle
|
| 339 |
+
|
| 340 |
+
def _init_stream_response_mixin(self, preparation: asyncio.Task):
|
| 341 |
+
self._message_aiter = None
|
| 342 |
+
self._preparation = preparation
|
| 343 |
+
self._response_style = _APIStyle.UNKNOWN
|
| 344 |
+
|
| 345 |
+
def _update_response_style(self, style: _APIStyle):
|
| 346 |
+
if self._response_style is _APIStyle.UNKNOWN:
|
| 347 |
+
self._response_style = style
|
| 348 |
+
elif self._response_style is not style:
|
| 349 |
+
raise cygrpc.UsageError(_API_STYLE_ERROR)
|
| 350 |
+
|
| 351 |
+
def cancel(self) -> bool:
|
| 352 |
+
if super().cancel():
|
| 353 |
+
self._preparation.cancel()
|
| 354 |
+
return True
|
| 355 |
+
else:
|
| 356 |
+
return False
|
| 357 |
+
|
| 358 |
+
async def _fetch_stream_responses(self) -> ResponseType:
|
| 359 |
+
message = await self._read()
|
| 360 |
+
while message is not cygrpc.EOF:
|
| 361 |
+
yield message
|
| 362 |
+
message = await self._read()
|
| 363 |
+
|
| 364 |
+
# If the read operation failed, Core should explain why.
|
| 365 |
+
await self._raise_for_status()
|
| 366 |
+
|
| 367 |
+
def __aiter__(self) -> AsyncIterator[ResponseType]:
|
| 368 |
+
self._update_response_style(_APIStyle.ASYNC_GENERATOR)
|
| 369 |
+
if self._message_aiter is None:
|
| 370 |
+
self._message_aiter = self._fetch_stream_responses()
|
| 371 |
+
return self._message_aiter
|
| 372 |
+
|
| 373 |
+
async def _read(self) -> ResponseType:
|
| 374 |
+
# Wait for the request being sent
|
| 375 |
+
await self._preparation
|
| 376 |
+
|
| 377 |
+
# Reads response message from Core
|
| 378 |
+
try:
|
| 379 |
+
raw_response = await self._cython_call.receive_serialized_message()
|
| 380 |
+
except asyncio.CancelledError:
|
| 381 |
+
if not self.cancelled():
|
| 382 |
+
self.cancel()
|
| 383 |
+
raise
|
| 384 |
+
|
| 385 |
+
if raw_response is cygrpc.EOF:
|
| 386 |
+
return cygrpc.EOF
|
| 387 |
+
else:
|
| 388 |
+
return _common.deserialize(
|
| 389 |
+
raw_response, self._response_deserializer
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
async def read(self) -> Union[EOFType, ResponseType]:
|
| 393 |
+
if self.done():
|
| 394 |
+
await self._raise_for_status()
|
| 395 |
+
return cygrpc.EOF
|
| 396 |
+
self._update_response_style(_APIStyle.READER_WRITER)
|
| 397 |
+
|
| 398 |
+
response_message = await self._read()
|
| 399 |
+
|
| 400 |
+
if response_message is cygrpc.EOF:
|
| 401 |
+
# If the read operation failed, Core should explain why.
|
| 402 |
+
await self._raise_for_status()
|
| 403 |
+
return response_message
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
class _StreamRequestMixin(Call):
|
| 407 |
+
_metadata_sent: asyncio.Event
|
| 408 |
+
_done_writing_flag: bool
|
| 409 |
+
_async_request_poller: Optional[asyncio.Task]
|
| 410 |
+
_request_style: _APIStyle
|
| 411 |
+
|
| 412 |
+
def _init_stream_request_mixin(
|
| 413 |
+
self, request_iterator: Optional[RequestIterableType]
|
| 414 |
+
):
|
| 415 |
+
self._metadata_sent = asyncio.Event()
|
| 416 |
+
self._done_writing_flag = False
|
| 417 |
+
|
| 418 |
+
# If user passes in an async iterator, create a consumer Task.
|
| 419 |
+
if request_iterator is not None:
|
| 420 |
+
self._async_request_poller = self._loop.create_task(
|
| 421 |
+
self._consume_request_iterator(request_iterator)
|
| 422 |
+
)
|
| 423 |
+
self._request_style = _APIStyle.ASYNC_GENERATOR
|
| 424 |
+
else:
|
| 425 |
+
self._async_request_poller = None
|
| 426 |
+
self._request_style = _APIStyle.READER_WRITER
|
| 427 |
+
|
| 428 |
+
def _raise_for_different_style(self, style: _APIStyle):
|
| 429 |
+
if self._request_style is not style:
|
| 430 |
+
raise cygrpc.UsageError(_API_STYLE_ERROR)
|
| 431 |
+
|
| 432 |
+
def cancel(self) -> bool:
|
| 433 |
+
if super().cancel():
|
| 434 |
+
if self._async_request_poller is not None:
|
| 435 |
+
self._async_request_poller.cancel()
|
| 436 |
+
return True
|
| 437 |
+
else:
|
| 438 |
+
return False
|
| 439 |
+
|
| 440 |
+
def _metadata_sent_observer(self):
|
| 441 |
+
self._metadata_sent.set()
|
| 442 |
+
|
| 443 |
+
async def _consume_request_iterator(
|
| 444 |
+
self, request_iterator: RequestIterableType
|
| 445 |
+
) -> None:
|
| 446 |
+
try:
|
| 447 |
+
if inspect.isasyncgen(request_iterator) or hasattr(
|
| 448 |
+
request_iterator, "__aiter__"
|
| 449 |
+
):
|
| 450 |
+
async for request in request_iterator:
|
| 451 |
+
try:
|
| 452 |
+
await self._write(request)
|
| 453 |
+
except AioRpcError as rpc_error:
|
| 454 |
+
_LOGGER.debug(
|
| 455 |
+
(
|
| 456 |
+
"Exception while consuming the"
|
| 457 |
+
" request_iterator: %s"
|
| 458 |
+
),
|
| 459 |
+
rpc_error,
|
| 460 |
+
)
|
| 461 |
+
return
|
| 462 |
+
else:
|
| 463 |
+
for request in request_iterator:
|
| 464 |
+
try:
|
| 465 |
+
await self._write(request)
|
| 466 |
+
except AioRpcError as rpc_error:
|
| 467 |
+
_LOGGER.debug(
|
| 468 |
+
(
|
| 469 |
+
"Exception while consuming the"
|
| 470 |
+
" request_iterator: %s"
|
| 471 |
+
),
|
| 472 |
+
rpc_error,
|
| 473 |
+
)
|
| 474 |
+
return
|
| 475 |
+
|
| 476 |
+
await self._done_writing()
|
| 477 |
+
except: # pylint: disable=bare-except
|
| 478 |
+
# Client iterators can raise exceptions, which we should handle by
|
| 479 |
+
# cancelling the RPC and logging the client's error. No exceptions
|
| 480 |
+
# should escape this function.
|
| 481 |
+
_LOGGER.debug(
|
| 482 |
+
"Client request_iterator raised exception:\n%s",
|
| 483 |
+
traceback.format_exc(),
|
| 484 |
+
)
|
| 485 |
+
self.cancel()
|
| 486 |
+
|
| 487 |
+
async def _write(self, request: RequestType) -> None:
|
| 488 |
+
if self.done():
|
| 489 |
+
raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
|
| 490 |
+
if self._done_writing_flag:
|
| 491 |
+
raise asyncio.InvalidStateError(_RPC_HALF_CLOSED_DETAILS)
|
| 492 |
+
if not self._metadata_sent.is_set():
|
| 493 |
+
await self._metadata_sent.wait()
|
| 494 |
+
if self.done():
|
| 495 |
+
await self._raise_for_status()
|
| 496 |
+
|
| 497 |
+
serialized_request = _common.serialize(
|
| 498 |
+
request, self._request_serializer
|
| 499 |
+
)
|
| 500 |
+
try:
|
| 501 |
+
await self._cython_call.send_serialized_message(serialized_request)
|
| 502 |
+
except cygrpc.InternalError as err:
|
| 503 |
+
self._cython_call.set_internal_error(str(err))
|
| 504 |
+
await self._raise_for_status()
|
| 505 |
+
except asyncio.CancelledError:
|
| 506 |
+
if not self.cancelled():
|
| 507 |
+
self.cancel()
|
| 508 |
+
raise
|
| 509 |
+
|
| 510 |
+
async def _done_writing(self) -> None:
|
| 511 |
+
if self.done():
|
| 512 |
+
# If the RPC is finished, do nothing.
|
| 513 |
+
return
|
| 514 |
+
if not self._done_writing_flag:
|
| 515 |
+
# If the done writing is not sent before, try to send it.
|
| 516 |
+
self._done_writing_flag = True
|
| 517 |
+
try:
|
| 518 |
+
await self._cython_call.send_receive_close()
|
| 519 |
+
except asyncio.CancelledError:
|
| 520 |
+
if not self.cancelled():
|
| 521 |
+
self.cancel()
|
| 522 |
+
raise
|
| 523 |
+
|
| 524 |
+
async def write(self, request: RequestType) -> None:
|
| 525 |
+
self._raise_for_different_style(_APIStyle.READER_WRITER)
|
| 526 |
+
await self._write(request)
|
| 527 |
+
|
| 528 |
+
async def done_writing(self) -> None:
|
| 529 |
+
"""Signal peer that client is done writing.
|
| 530 |
+
|
| 531 |
+
This method is idempotent.
|
| 532 |
+
"""
|
| 533 |
+
self._raise_for_different_style(_APIStyle.READER_WRITER)
|
| 534 |
+
await self._done_writing()
|
| 535 |
+
|
| 536 |
+
async def wait_for_connection(self) -> None:
|
| 537 |
+
await self._metadata_sent.wait()
|
| 538 |
+
if self.done():
|
| 539 |
+
await self._raise_for_status()
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
|
| 543 |
+
"""Object for managing unary-unary RPC calls.
|
| 544 |
+
|
| 545 |
+
Returned when an instance of `UnaryUnaryMultiCallable` object is called.
|
| 546 |
+
"""
|
| 547 |
+
|
| 548 |
+
_request: RequestType
|
| 549 |
+
_invocation_task: asyncio.Task
|
| 550 |
+
|
| 551 |
+
# pylint: disable=too-many-arguments
|
| 552 |
+
def __init__(
|
| 553 |
+
self,
|
| 554 |
+
request: RequestType,
|
| 555 |
+
deadline: Optional[float],
|
| 556 |
+
metadata: Metadata,
|
| 557 |
+
credentials: Optional[grpc.CallCredentials],
|
| 558 |
+
wait_for_ready: Optional[bool],
|
| 559 |
+
channel: cygrpc.AioChannel,
|
| 560 |
+
method: bytes,
|
| 561 |
+
request_serializer: SerializingFunction,
|
| 562 |
+
response_deserializer: DeserializingFunction,
|
| 563 |
+
loop: asyncio.AbstractEventLoop,
|
| 564 |
+
) -> None:
|
| 565 |
+
super().__init__(
|
| 566 |
+
channel.call(method, deadline, credentials, wait_for_ready),
|
| 567 |
+
metadata,
|
| 568 |
+
request_serializer,
|
| 569 |
+
response_deserializer,
|
| 570 |
+
loop,
|
| 571 |
+
)
|
| 572 |
+
self._request = request
|
| 573 |
+
self._context = cygrpc.build_census_context()
|
| 574 |
+
self._invocation_task = loop.create_task(self._invoke())
|
| 575 |
+
self._init_unary_response_mixin(self._invocation_task)
|
| 576 |
+
|
| 577 |
+
async def _invoke(self) -> ResponseType:
|
| 578 |
+
serialized_request = _common.serialize(
|
| 579 |
+
self._request, self._request_serializer
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
# NOTE(lidiz) asyncio.CancelledError is not a good transport for status,
|
| 583 |
+
# because the asyncio.Task class do not cache the exception object.
|
| 584 |
+
# https://github.com/python/cpython/blob/edad4d89e357c92f70c0324b937845d652b20afd/Lib/asyncio/tasks.py#L785
|
| 585 |
+
try:
|
| 586 |
+
serialized_response = await self._cython_call.unary_unary(
|
| 587 |
+
serialized_request, self._metadata, self._context
|
| 588 |
+
)
|
| 589 |
+
except asyncio.CancelledError:
|
| 590 |
+
if not self.cancelled():
|
| 591 |
+
self.cancel()
|
| 592 |
+
|
| 593 |
+
if self._cython_call.is_ok():
|
| 594 |
+
return _common.deserialize(
|
| 595 |
+
serialized_response, self._response_deserializer
|
| 596 |
+
)
|
| 597 |
+
else:
|
| 598 |
+
return cygrpc.EOF
|
| 599 |
+
|
| 600 |
+
async def wait_for_connection(self) -> None:
|
| 601 |
+
await self._invocation_task
|
| 602 |
+
if self.done():
|
| 603 |
+
await self._raise_for_status()
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall):
|
| 607 |
+
"""Object for managing unary-stream RPC calls.
|
| 608 |
+
|
| 609 |
+
Returned when an instance of `UnaryStreamMultiCallable` object is called.
|
| 610 |
+
"""
|
| 611 |
+
|
| 612 |
+
_request: RequestType
|
| 613 |
+
_send_unary_request_task: asyncio.Task
|
| 614 |
+
|
| 615 |
+
# pylint: disable=too-many-arguments
|
| 616 |
+
def __init__(
|
| 617 |
+
self,
|
| 618 |
+
request: RequestType,
|
| 619 |
+
deadline: Optional[float],
|
| 620 |
+
metadata: Metadata,
|
| 621 |
+
credentials: Optional[grpc.CallCredentials],
|
| 622 |
+
wait_for_ready: Optional[bool],
|
| 623 |
+
channel: cygrpc.AioChannel,
|
| 624 |
+
method: bytes,
|
| 625 |
+
request_serializer: SerializingFunction,
|
| 626 |
+
response_deserializer: DeserializingFunction,
|
| 627 |
+
loop: asyncio.AbstractEventLoop,
|
| 628 |
+
) -> None:
|
| 629 |
+
super().__init__(
|
| 630 |
+
channel.call(method, deadline, credentials, wait_for_ready),
|
| 631 |
+
metadata,
|
| 632 |
+
request_serializer,
|
| 633 |
+
response_deserializer,
|
| 634 |
+
loop,
|
| 635 |
+
)
|
| 636 |
+
self._request = request
|
| 637 |
+
self._context = cygrpc.build_census_context()
|
| 638 |
+
self._send_unary_request_task = loop.create_task(
|
| 639 |
+
self._send_unary_request()
|
| 640 |
+
)
|
| 641 |
+
self._init_stream_response_mixin(self._send_unary_request_task)
|
| 642 |
+
|
| 643 |
+
async def _send_unary_request(self) -> ResponseType:
|
| 644 |
+
serialized_request = _common.serialize(
|
| 645 |
+
self._request, self._request_serializer
|
| 646 |
+
)
|
| 647 |
+
try:
|
| 648 |
+
await self._cython_call.initiate_unary_stream(
|
| 649 |
+
serialized_request, self._metadata, self._context
|
| 650 |
+
)
|
| 651 |
+
except asyncio.CancelledError:
|
| 652 |
+
if not self.cancelled():
|
| 653 |
+
self.cancel()
|
| 654 |
+
raise
|
| 655 |
+
|
| 656 |
+
async def wait_for_connection(self) -> None:
|
| 657 |
+
await self._send_unary_request_task
|
| 658 |
+
if self.done():
|
| 659 |
+
await self._raise_for_status()
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
# pylint: disable=too-many-ancestors
|
| 663 |
+
class StreamUnaryCall(
|
| 664 |
+
_StreamRequestMixin, _UnaryResponseMixin, Call, _base_call.StreamUnaryCall
|
| 665 |
+
):
|
| 666 |
+
"""Object for managing stream-unary RPC calls.
|
| 667 |
+
|
| 668 |
+
Returned when an instance of `StreamUnaryMultiCallable` object is called.
|
| 669 |
+
"""
|
| 670 |
+
|
| 671 |
+
# pylint: disable=too-many-arguments
|
| 672 |
+
def __init__(
|
| 673 |
+
self,
|
| 674 |
+
request_iterator: Optional[RequestIterableType],
|
| 675 |
+
deadline: Optional[float],
|
| 676 |
+
metadata: Metadata,
|
| 677 |
+
credentials: Optional[grpc.CallCredentials],
|
| 678 |
+
wait_for_ready: Optional[bool],
|
| 679 |
+
channel: cygrpc.AioChannel,
|
| 680 |
+
method: bytes,
|
| 681 |
+
request_serializer: SerializingFunction,
|
| 682 |
+
response_deserializer: DeserializingFunction,
|
| 683 |
+
loop: asyncio.AbstractEventLoop,
|
| 684 |
+
) -> None:
|
| 685 |
+
super().__init__(
|
| 686 |
+
channel.call(method, deadline, credentials, wait_for_ready),
|
| 687 |
+
metadata,
|
| 688 |
+
request_serializer,
|
| 689 |
+
response_deserializer,
|
| 690 |
+
loop,
|
| 691 |
+
)
|
| 692 |
+
|
| 693 |
+
self._context = cygrpc.build_census_context()
|
| 694 |
+
self._init_stream_request_mixin(request_iterator)
|
| 695 |
+
self._init_unary_response_mixin(loop.create_task(self._conduct_rpc()))
|
| 696 |
+
|
| 697 |
+
async def _conduct_rpc(self) -> ResponseType:
|
| 698 |
+
try:
|
| 699 |
+
serialized_response = await self._cython_call.stream_unary(
|
| 700 |
+
self._metadata, self._metadata_sent_observer, self._context
|
| 701 |
+
)
|
| 702 |
+
except asyncio.CancelledError:
|
| 703 |
+
if not self.cancelled():
|
| 704 |
+
self.cancel()
|
| 705 |
+
raise
|
| 706 |
+
|
| 707 |
+
if self._cython_call.is_ok():
|
| 708 |
+
return _common.deserialize(
|
| 709 |
+
serialized_response, self._response_deserializer
|
| 710 |
+
)
|
| 711 |
+
else:
|
| 712 |
+
return cygrpc.EOF
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
class StreamStreamCall(
|
| 716 |
+
_StreamRequestMixin, _StreamResponseMixin, Call, _base_call.StreamStreamCall
|
| 717 |
+
):
|
| 718 |
+
"""Object for managing stream-stream RPC calls.
|
| 719 |
+
|
| 720 |
+
Returned when an instance of `StreamStreamMultiCallable` object is called.
|
| 721 |
+
"""
|
| 722 |
+
|
| 723 |
+
_initializer: asyncio.Task
|
| 724 |
+
|
| 725 |
+
# pylint: disable=too-many-arguments
|
| 726 |
+
def __init__(
|
| 727 |
+
self,
|
| 728 |
+
request_iterator: Optional[RequestIterableType],
|
| 729 |
+
deadline: Optional[float],
|
| 730 |
+
metadata: Metadata,
|
| 731 |
+
credentials: Optional[grpc.CallCredentials],
|
| 732 |
+
wait_for_ready: Optional[bool],
|
| 733 |
+
channel: cygrpc.AioChannel,
|
| 734 |
+
method: bytes,
|
| 735 |
+
request_serializer: SerializingFunction,
|
| 736 |
+
response_deserializer: DeserializingFunction,
|
| 737 |
+
loop: asyncio.AbstractEventLoop,
|
| 738 |
+
) -> None:
|
| 739 |
+
super().__init__(
|
| 740 |
+
channel.call(method, deadline, credentials, wait_for_ready),
|
| 741 |
+
metadata,
|
| 742 |
+
request_serializer,
|
| 743 |
+
response_deserializer,
|
| 744 |
+
loop,
|
| 745 |
+
)
|
| 746 |
+
self._context = cygrpc.build_census_context()
|
| 747 |
+
self._initializer = self._loop.create_task(self._prepare_rpc())
|
| 748 |
+
self._init_stream_request_mixin(request_iterator)
|
| 749 |
+
self._init_stream_response_mixin(self._initializer)
|
| 750 |
+
|
| 751 |
+
async def _prepare_rpc(self):
|
| 752 |
+
"""This method prepares the RPC for receiving/sending messages.
|
| 753 |
+
|
| 754 |
+
All other operations around the stream should only happen after the
|
| 755 |
+
completion of this method.
|
| 756 |
+
"""
|
| 757 |
+
try:
|
| 758 |
+
await self._cython_call.initiate_stream_stream(
|
| 759 |
+
self._metadata, self._metadata_sent_observer, self._context
|
| 760 |
+
)
|
| 761 |
+
except asyncio.CancelledError:
|
| 762 |
+
if not self.cancelled():
|
| 763 |
+
self.cancel()
|
| 764 |
+
# No need to raise RpcError here, because no one will `await` this task.
|
lib/python3.10/site-packages/grpc/aio/_typing.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 The gRPC Authors
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Common types for gRPC Async API"""
|
| 15 |
+
|
| 16 |
+
from typing import (
|
| 17 |
+
Any,
|
| 18 |
+
AsyncIterable,
|
| 19 |
+
Callable,
|
| 20 |
+
Iterable,
|
| 21 |
+
Sequence,
|
| 22 |
+
Tuple,
|
| 23 |
+
TypeVar,
|
| 24 |
+
Union,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
from grpc._cython.cygrpc import EOF
|
| 28 |
+
|
| 29 |
+
from ._metadata import Metadata
|
| 30 |
+
from ._metadata import MetadataKey
|
| 31 |
+
from ._metadata import MetadataValue
|
| 32 |
+
|
| 33 |
+
RequestType = TypeVar("RequestType")
|
| 34 |
+
ResponseType = TypeVar("ResponseType")
|
| 35 |
+
SerializingFunction = Callable[[Any], bytes]
|
| 36 |
+
DeserializingFunction = Callable[[bytes], Any]
|
| 37 |
+
MetadatumType = Tuple[MetadataKey, MetadataValue]
|
| 38 |
+
MetadataType = Union[Metadata, Sequence[MetadatumType]]
|
| 39 |
+
ChannelArgumentType = Sequence[Tuple[str, Any]]
|
| 40 |
+
EOFType = type(EOF)
|
| 41 |
+
DoneCallbackType = Callable[[Any], None]
|
| 42 |
+
RequestIterableType = Union[Iterable[Any], AsyncIterable[Any]]
|
| 43 |
+
ResponseIterableType = AsyncIterable[Any]
|
lib/python3.10/site-packages/grpc/beta/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 gRPC authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
lib/python3.10/site-packages/grpc/beta/_client_adaptations.py
ADDED
|
@@ -0,0 +1,1015 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2016 gRPC authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Translates gRPC's client-side API into gRPC's client-side Beta API."""
|
| 15 |
+
|
| 16 |
+
import grpc
|
| 17 |
+
from grpc import _common
|
| 18 |
+
from grpc.beta import _metadata
|
| 19 |
+
from grpc.beta import interfaces
|
| 20 |
+
from grpc.framework.common import cardinality
|
| 21 |
+
from grpc.framework.foundation import future
|
| 22 |
+
from grpc.framework.interfaces.face import face
|
| 23 |
+
|
| 24 |
+
# pylint: disable=too-many-arguments,too-many-locals,unused-argument
|
| 25 |
+
|
| 26 |
+
_STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS = {
|
| 27 |
+
grpc.StatusCode.CANCELLED: (
|
| 28 |
+
face.Abortion.Kind.CANCELLED,
|
| 29 |
+
face.CancellationError,
|
| 30 |
+
),
|
| 31 |
+
grpc.StatusCode.UNKNOWN: (
|
| 32 |
+
face.Abortion.Kind.REMOTE_FAILURE,
|
| 33 |
+
face.RemoteError,
|
| 34 |
+
),
|
| 35 |
+
grpc.StatusCode.DEADLINE_EXCEEDED: (
|
| 36 |
+
face.Abortion.Kind.EXPIRED,
|
| 37 |
+
face.ExpirationError,
|
| 38 |
+
),
|
| 39 |
+
grpc.StatusCode.UNIMPLEMENTED: (
|
| 40 |
+
face.Abortion.Kind.LOCAL_FAILURE,
|
| 41 |
+
face.LocalError,
|
| 42 |
+
),
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _effective_metadata(metadata, metadata_transformer):
|
| 47 |
+
non_none_metadata = () if metadata is None else metadata
|
| 48 |
+
if metadata_transformer is None:
|
| 49 |
+
return non_none_metadata
|
| 50 |
+
else:
|
| 51 |
+
return metadata_transformer(non_none_metadata)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _credentials(grpc_call_options):
|
| 55 |
+
return None if grpc_call_options is None else grpc_call_options.credentials
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _abortion(rpc_error_call):
|
| 59 |
+
code = rpc_error_call.code()
|
| 60 |
+
pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
|
| 61 |
+
error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0]
|
| 62 |
+
return face.Abortion(
|
| 63 |
+
error_kind,
|
| 64 |
+
rpc_error_call.initial_metadata(),
|
| 65 |
+
rpc_error_call.trailing_metadata(),
|
| 66 |
+
code,
|
| 67 |
+
rpc_error_call.details(),
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _abortion_error(rpc_error_call):
|
| 72 |
+
code = rpc_error_call.code()
|
| 73 |
+
pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
|
| 74 |
+
exception_class = face.AbortionError if pair is None else pair[1]
|
| 75 |
+
return exception_class(
|
| 76 |
+
rpc_error_call.initial_metadata(),
|
| 77 |
+
rpc_error_call.trailing_metadata(),
|
| 78 |
+
code,
|
| 79 |
+
rpc_error_call.details(),
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class _InvocationProtocolContext(interfaces.GRPCInvocationContext):
|
| 84 |
+
def disable_next_request_compression(self):
|
| 85 |
+
pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class _Rendezvous(future.Future, face.Call):
|
| 89 |
+
def __init__(self, response_future, response_iterator, call):
|
| 90 |
+
self._future = response_future
|
| 91 |
+
self._iterator = response_iterator
|
| 92 |
+
self._call = call
|
| 93 |
+
|
| 94 |
+
def cancel(self):
|
| 95 |
+
return self._call.cancel()
|
| 96 |
+
|
| 97 |
+
def cancelled(self):
|
| 98 |
+
return self._future.cancelled()
|
| 99 |
+
|
| 100 |
+
def running(self):
|
| 101 |
+
return self._future.running()
|
| 102 |
+
|
| 103 |
+
def done(self):
|
| 104 |
+
return self._future.done()
|
| 105 |
+
|
| 106 |
+
def result(self, timeout=None):
|
| 107 |
+
try:
|
| 108 |
+
return self._future.result(timeout=timeout)
|
| 109 |
+
except grpc.RpcError as rpc_error_call:
|
| 110 |
+
raise _abortion_error(rpc_error_call)
|
| 111 |
+
except grpc.FutureTimeoutError:
|
| 112 |
+
raise future.TimeoutError()
|
| 113 |
+
except grpc.FutureCancelledError:
|
| 114 |
+
raise future.CancelledError()
|
| 115 |
+
|
| 116 |
+
def exception(self, timeout=None):
|
| 117 |
+
try:
|
| 118 |
+
rpc_error_call = self._future.exception(timeout=timeout)
|
| 119 |
+
if rpc_error_call is None:
|
| 120 |
+
return None
|
| 121 |
+
else:
|
| 122 |
+
return _abortion_error(rpc_error_call)
|
| 123 |
+
except grpc.FutureTimeoutError:
|
| 124 |
+
raise future.TimeoutError()
|
| 125 |
+
except grpc.FutureCancelledError:
|
| 126 |
+
raise future.CancelledError()
|
| 127 |
+
|
| 128 |
+
def traceback(self, timeout=None):
|
| 129 |
+
try:
|
| 130 |
+
return self._future.traceback(timeout=timeout)
|
| 131 |
+
except grpc.FutureTimeoutError:
|
| 132 |
+
raise future.TimeoutError()
|
| 133 |
+
except grpc.FutureCancelledError:
|
| 134 |
+
raise future.CancelledError()
|
| 135 |
+
|
| 136 |
+
def add_done_callback(self, fn):
|
| 137 |
+
self._future.add_done_callback(lambda ignored_callback: fn(self))
|
| 138 |
+
|
| 139 |
+
def __iter__(self):
|
| 140 |
+
return self
|
| 141 |
+
|
| 142 |
+
def _next(self):
|
| 143 |
+
try:
|
| 144 |
+
return next(self._iterator)
|
| 145 |
+
except grpc.RpcError as rpc_error_call:
|
| 146 |
+
raise _abortion_error(rpc_error_call)
|
| 147 |
+
|
| 148 |
+
def __next__(self):
|
| 149 |
+
return self._next()
|
| 150 |
+
|
| 151 |
+
def next(self):
|
| 152 |
+
return self._next()
|
| 153 |
+
|
| 154 |
+
def is_active(self):
|
| 155 |
+
return self._call.is_active()
|
| 156 |
+
|
| 157 |
+
def time_remaining(self):
|
| 158 |
+
return self._call.time_remaining()
|
| 159 |
+
|
| 160 |
+
def add_abortion_callback(self, abortion_callback):
|
| 161 |
+
def done_callback():
|
| 162 |
+
if self.code() is not grpc.StatusCode.OK:
|
| 163 |
+
abortion_callback(_abortion(self._call))
|
| 164 |
+
|
| 165 |
+
registered = self._call.add_callback(done_callback)
|
| 166 |
+
return None if registered else done_callback()
|
| 167 |
+
|
| 168 |
+
def protocol_context(self):
|
| 169 |
+
return _InvocationProtocolContext()
|
| 170 |
+
|
| 171 |
+
def initial_metadata(self):
|
| 172 |
+
return _metadata.beta(self._call.initial_metadata())
|
| 173 |
+
|
| 174 |
+
def terminal_metadata(self):
|
| 175 |
+
return _metadata.beta(self._call.terminal_metadata())
|
| 176 |
+
|
| 177 |
+
def code(self):
|
| 178 |
+
return self._call.code()
|
| 179 |
+
|
| 180 |
+
def details(self):
|
| 181 |
+
return self._call.details()
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def _blocking_unary_unary(
|
| 185 |
+
channel,
|
| 186 |
+
group,
|
| 187 |
+
method,
|
| 188 |
+
timeout,
|
| 189 |
+
with_call,
|
| 190 |
+
protocol_options,
|
| 191 |
+
metadata,
|
| 192 |
+
metadata_transformer,
|
| 193 |
+
request,
|
| 194 |
+
request_serializer,
|
| 195 |
+
response_deserializer,
|
| 196 |
+
):
|
| 197 |
+
try:
|
| 198 |
+
multi_callable = channel.unary_unary(
|
| 199 |
+
_common.fully_qualified_method(group, method),
|
| 200 |
+
request_serializer=request_serializer,
|
| 201 |
+
response_deserializer=response_deserializer,
|
| 202 |
+
)
|
| 203 |
+
effective_metadata = _effective_metadata(metadata, metadata_transformer)
|
| 204 |
+
if with_call:
|
| 205 |
+
response, call = multi_callable.with_call(
|
| 206 |
+
request,
|
| 207 |
+
timeout=timeout,
|
| 208 |
+
metadata=_metadata.unbeta(effective_metadata),
|
| 209 |
+
credentials=_credentials(protocol_options),
|
| 210 |
+
)
|
| 211 |
+
return response, _Rendezvous(None, None, call)
|
| 212 |
+
else:
|
| 213 |
+
return multi_callable(
|
| 214 |
+
request,
|
| 215 |
+
timeout=timeout,
|
| 216 |
+
metadata=_metadata.unbeta(effective_metadata),
|
| 217 |
+
credentials=_credentials(protocol_options),
|
| 218 |
+
)
|
| 219 |
+
except grpc.RpcError as rpc_error_call:
|
| 220 |
+
raise _abortion_error(rpc_error_call)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def _future_unary_unary(
|
| 224 |
+
channel,
|
| 225 |
+
group,
|
| 226 |
+
method,
|
| 227 |
+
timeout,
|
| 228 |
+
protocol_options,
|
| 229 |
+
metadata,
|
| 230 |
+
metadata_transformer,
|
| 231 |
+
request,
|
| 232 |
+
request_serializer,
|
| 233 |
+
response_deserializer,
|
| 234 |
+
):
|
| 235 |
+
multi_callable = channel.unary_unary(
|
| 236 |
+
_common.fully_qualified_method(group, method),
|
| 237 |
+
request_serializer=request_serializer,
|
| 238 |
+
response_deserializer=response_deserializer,
|
| 239 |
+
)
|
| 240 |
+
effective_metadata = _effective_metadata(metadata, metadata_transformer)
|
| 241 |
+
response_future = multi_callable.future(
|
| 242 |
+
request,
|
| 243 |
+
timeout=timeout,
|
| 244 |
+
metadata=_metadata.unbeta(effective_metadata),
|
| 245 |
+
credentials=_credentials(protocol_options),
|
| 246 |
+
)
|
| 247 |
+
return _Rendezvous(response_future, None, response_future)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def _unary_stream(
|
| 251 |
+
channel,
|
| 252 |
+
group,
|
| 253 |
+
method,
|
| 254 |
+
timeout,
|
| 255 |
+
protocol_options,
|
| 256 |
+
metadata,
|
| 257 |
+
metadata_transformer,
|
| 258 |
+
request,
|
| 259 |
+
request_serializer,
|
| 260 |
+
response_deserializer,
|
| 261 |
+
):
|
| 262 |
+
multi_callable = channel.unary_stream(
|
| 263 |
+
_common.fully_qualified_method(group, method),
|
| 264 |
+
request_serializer=request_serializer,
|
| 265 |
+
response_deserializer=response_deserializer,
|
| 266 |
+
)
|
| 267 |
+
effective_metadata = _effective_metadata(metadata, metadata_transformer)
|
| 268 |
+
response_iterator = multi_callable(
|
| 269 |
+
request,
|
| 270 |
+
timeout=timeout,
|
| 271 |
+
metadata=_metadata.unbeta(effective_metadata),
|
| 272 |
+
credentials=_credentials(protocol_options),
|
| 273 |
+
)
|
| 274 |
+
return _Rendezvous(None, response_iterator, response_iterator)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def _blocking_stream_unary(
|
| 278 |
+
channel,
|
| 279 |
+
group,
|
| 280 |
+
method,
|
| 281 |
+
timeout,
|
| 282 |
+
with_call,
|
| 283 |
+
protocol_options,
|
| 284 |
+
metadata,
|
| 285 |
+
metadata_transformer,
|
| 286 |
+
request_iterator,
|
| 287 |
+
request_serializer,
|
| 288 |
+
response_deserializer,
|
| 289 |
+
):
|
| 290 |
+
try:
|
| 291 |
+
multi_callable = channel.stream_unary(
|
| 292 |
+
_common.fully_qualified_method(group, method),
|
| 293 |
+
request_serializer=request_serializer,
|
| 294 |
+
response_deserializer=response_deserializer,
|
| 295 |
+
)
|
| 296 |
+
effective_metadata = _effective_metadata(metadata, metadata_transformer)
|
| 297 |
+
if with_call:
|
| 298 |
+
response, call = multi_callable.with_call(
|
| 299 |
+
request_iterator,
|
| 300 |
+
timeout=timeout,
|
| 301 |
+
metadata=_metadata.unbeta(effective_metadata),
|
| 302 |
+
credentials=_credentials(protocol_options),
|
| 303 |
+
)
|
| 304 |
+
return response, _Rendezvous(None, None, call)
|
| 305 |
+
else:
|
| 306 |
+
return multi_callable(
|
| 307 |
+
request_iterator,
|
| 308 |
+
timeout=timeout,
|
| 309 |
+
metadata=_metadata.unbeta(effective_metadata),
|
| 310 |
+
credentials=_credentials(protocol_options),
|
| 311 |
+
)
|
| 312 |
+
except grpc.RpcError as rpc_error_call:
|
| 313 |
+
raise _abortion_error(rpc_error_call)
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
def _future_stream_unary(
|
| 317 |
+
channel,
|
| 318 |
+
group,
|
| 319 |
+
method,
|
| 320 |
+
timeout,
|
| 321 |
+
protocol_options,
|
| 322 |
+
metadata,
|
| 323 |
+
metadata_transformer,
|
| 324 |
+
request_iterator,
|
| 325 |
+
request_serializer,
|
| 326 |
+
response_deserializer,
|
| 327 |
+
):
|
| 328 |
+
multi_callable = channel.stream_unary(
|
| 329 |
+
_common.fully_qualified_method(group, method),
|
| 330 |
+
request_serializer=request_serializer,
|
| 331 |
+
response_deserializer=response_deserializer,
|
| 332 |
+
)
|
| 333 |
+
effective_metadata = _effective_metadata(metadata, metadata_transformer)
|
| 334 |
+
response_future = multi_callable.future(
|
| 335 |
+
request_iterator,
|
| 336 |
+
timeout=timeout,
|
| 337 |
+
metadata=_metadata.unbeta(effective_metadata),
|
| 338 |
+
credentials=_credentials(protocol_options),
|
| 339 |
+
)
|
| 340 |
+
return _Rendezvous(response_future, None, response_future)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def _stream_stream(
|
| 344 |
+
channel,
|
| 345 |
+
group,
|
| 346 |
+
method,
|
| 347 |
+
timeout,
|
| 348 |
+
protocol_options,
|
| 349 |
+
metadata,
|
| 350 |
+
metadata_transformer,
|
| 351 |
+
request_iterator,
|
| 352 |
+
request_serializer,
|
| 353 |
+
response_deserializer,
|
| 354 |
+
):
|
| 355 |
+
multi_callable = channel.stream_stream(
|
| 356 |
+
_common.fully_qualified_method(group, method),
|
| 357 |
+
request_serializer=request_serializer,
|
| 358 |
+
response_deserializer=response_deserializer,
|
| 359 |
+
)
|
| 360 |
+
effective_metadata = _effective_metadata(metadata, metadata_transformer)
|
| 361 |
+
response_iterator = multi_callable(
|
| 362 |
+
request_iterator,
|
| 363 |
+
timeout=timeout,
|
| 364 |
+
metadata=_metadata.unbeta(effective_metadata),
|
| 365 |
+
credentials=_credentials(protocol_options),
|
| 366 |
+
)
|
| 367 |
+
return _Rendezvous(None, response_iterator, response_iterator)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable):
|
| 371 |
+
def __init__(
|
| 372 |
+
self,
|
| 373 |
+
channel,
|
| 374 |
+
group,
|
| 375 |
+
method,
|
| 376 |
+
metadata_transformer,
|
| 377 |
+
request_serializer,
|
| 378 |
+
response_deserializer,
|
| 379 |
+
):
|
| 380 |
+
self._channel = channel
|
| 381 |
+
self._group = group
|
| 382 |
+
self._method = method
|
| 383 |
+
self._metadata_transformer = metadata_transformer
|
| 384 |
+
self._request_serializer = request_serializer
|
| 385 |
+
self._response_deserializer = response_deserializer
|
| 386 |
+
|
| 387 |
+
def __call__(
|
| 388 |
+
self,
|
| 389 |
+
request,
|
| 390 |
+
timeout,
|
| 391 |
+
metadata=None,
|
| 392 |
+
with_call=False,
|
| 393 |
+
protocol_options=None,
|
| 394 |
+
):
|
| 395 |
+
return _blocking_unary_unary(
|
| 396 |
+
self._channel,
|
| 397 |
+
self._group,
|
| 398 |
+
self._method,
|
| 399 |
+
timeout,
|
| 400 |
+
with_call,
|
| 401 |
+
protocol_options,
|
| 402 |
+
metadata,
|
| 403 |
+
self._metadata_transformer,
|
| 404 |
+
request,
|
| 405 |
+
self._request_serializer,
|
| 406 |
+
self._response_deserializer,
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
def future(self, request, timeout, metadata=None, protocol_options=None):
|
| 410 |
+
return _future_unary_unary(
|
| 411 |
+
self._channel,
|
| 412 |
+
self._group,
|
| 413 |
+
self._method,
|
| 414 |
+
timeout,
|
| 415 |
+
protocol_options,
|
| 416 |
+
metadata,
|
| 417 |
+
self._metadata_transformer,
|
| 418 |
+
request,
|
| 419 |
+
self._request_serializer,
|
| 420 |
+
self._response_deserializer,
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
def event(
|
| 424 |
+
self,
|
| 425 |
+
request,
|
| 426 |
+
receiver,
|
| 427 |
+
abortion_callback,
|
| 428 |
+
timeout,
|
| 429 |
+
metadata=None,
|
| 430 |
+
protocol_options=None,
|
| 431 |
+
):
|
| 432 |
+
raise NotImplementedError()
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable):
|
| 436 |
+
def __init__(
|
| 437 |
+
self,
|
| 438 |
+
channel,
|
| 439 |
+
group,
|
| 440 |
+
method,
|
| 441 |
+
metadata_transformer,
|
| 442 |
+
request_serializer,
|
| 443 |
+
response_deserializer,
|
| 444 |
+
):
|
| 445 |
+
self._channel = channel
|
| 446 |
+
self._group = group
|
| 447 |
+
self._method = method
|
| 448 |
+
self._metadata_transformer = metadata_transformer
|
| 449 |
+
self._request_serializer = request_serializer
|
| 450 |
+
self._response_deserializer = response_deserializer
|
| 451 |
+
|
| 452 |
+
def __call__(self, request, timeout, metadata=None, protocol_options=None):
|
| 453 |
+
return _unary_stream(
|
| 454 |
+
self._channel,
|
| 455 |
+
self._group,
|
| 456 |
+
self._method,
|
| 457 |
+
timeout,
|
| 458 |
+
protocol_options,
|
| 459 |
+
metadata,
|
| 460 |
+
self._metadata_transformer,
|
| 461 |
+
request,
|
| 462 |
+
self._request_serializer,
|
| 463 |
+
self._response_deserializer,
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
def event(
|
| 467 |
+
self,
|
| 468 |
+
request,
|
| 469 |
+
receiver,
|
| 470 |
+
abortion_callback,
|
| 471 |
+
timeout,
|
| 472 |
+
metadata=None,
|
| 473 |
+
protocol_options=None,
|
| 474 |
+
):
|
| 475 |
+
raise NotImplementedError()
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable):
|
| 479 |
+
def __init__(
|
| 480 |
+
self,
|
| 481 |
+
channel,
|
| 482 |
+
group,
|
| 483 |
+
method,
|
| 484 |
+
metadata_transformer,
|
| 485 |
+
request_serializer,
|
| 486 |
+
response_deserializer,
|
| 487 |
+
):
|
| 488 |
+
self._channel = channel
|
| 489 |
+
self._group = group
|
| 490 |
+
self._method = method
|
| 491 |
+
self._metadata_transformer = metadata_transformer
|
| 492 |
+
self._request_serializer = request_serializer
|
| 493 |
+
self._response_deserializer = response_deserializer
|
| 494 |
+
|
| 495 |
+
def __call__(
|
| 496 |
+
self,
|
| 497 |
+
request_iterator,
|
| 498 |
+
timeout,
|
| 499 |
+
metadata=None,
|
| 500 |
+
with_call=False,
|
| 501 |
+
protocol_options=None,
|
| 502 |
+
):
|
| 503 |
+
return _blocking_stream_unary(
|
| 504 |
+
self._channel,
|
| 505 |
+
self._group,
|
| 506 |
+
self._method,
|
| 507 |
+
timeout,
|
| 508 |
+
with_call,
|
| 509 |
+
protocol_options,
|
| 510 |
+
metadata,
|
| 511 |
+
self._metadata_transformer,
|
| 512 |
+
request_iterator,
|
| 513 |
+
self._request_serializer,
|
| 514 |
+
self._response_deserializer,
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
def future(
|
| 518 |
+
self, request_iterator, timeout, metadata=None, protocol_options=None
|
| 519 |
+
):
|
| 520 |
+
return _future_stream_unary(
|
| 521 |
+
self._channel,
|
| 522 |
+
self._group,
|
| 523 |
+
self._method,
|
| 524 |
+
timeout,
|
| 525 |
+
protocol_options,
|
| 526 |
+
metadata,
|
| 527 |
+
self._metadata_transformer,
|
| 528 |
+
request_iterator,
|
| 529 |
+
self._request_serializer,
|
| 530 |
+
self._response_deserializer,
|
| 531 |
+
)
|
| 532 |
+
|
| 533 |
+
def event(
|
| 534 |
+
self,
|
| 535 |
+
receiver,
|
| 536 |
+
abortion_callback,
|
| 537 |
+
timeout,
|
| 538 |
+
metadata=None,
|
| 539 |
+
protocol_options=None,
|
| 540 |
+
):
|
| 541 |
+
raise NotImplementedError()
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
class _StreamStreamMultiCallable(face.StreamStreamMultiCallable):
|
| 545 |
+
def __init__(
|
| 546 |
+
self,
|
| 547 |
+
channel,
|
| 548 |
+
group,
|
| 549 |
+
method,
|
| 550 |
+
metadata_transformer,
|
| 551 |
+
request_serializer,
|
| 552 |
+
response_deserializer,
|
| 553 |
+
):
|
| 554 |
+
self._channel = channel
|
| 555 |
+
self._group = group
|
| 556 |
+
self._method = method
|
| 557 |
+
self._metadata_transformer = metadata_transformer
|
| 558 |
+
self._request_serializer = request_serializer
|
| 559 |
+
self._response_deserializer = response_deserializer
|
| 560 |
+
|
| 561 |
+
def __call__(
|
| 562 |
+
self, request_iterator, timeout, metadata=None, protocol_options=None
|
| 563 |
+
):
|
| 564 |
+
return _stream_stream(
|
| 565 |
+
self._channel,
|
| 566 |
+
self._group,
|
| 567 |
+
self._method,
|
| 568 |
+
timeout,
|
| 569 |
+
protocol_options,
|
| 570 |
+
metadata,
|
| 571 |
+
self._metadata_transformer,
|
| 572 |
+
request_iterator,
|
| 573 |
+
self._request_serializer,
|
| 574 |
+
self._response_deserializer,
|
| 575 |
+
)
|
| 576 |
+
|
| 577 |
+
def event(
|
| 578 |
+
self,
|
| 579 |
+
receiver,
|
| 580 |
+
abortion_callback,
|
| 581 |
+
timeout,
|
| 582 |
+
metadata=None,
|
| 583 |
+
protocol_options=None,
|
| 584 |
+
):
|
| 585 |
+
raise NotImplementedError()
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
class _GenericStub(face.GenericStub):
|
| 589 |
+
def __init__(
|
| 590 |
+
self,
|
| 591 |
+
channel,
|
| 592 |
+
metadata_transformer,
|
| 593 |
+
request_serializers,
|
| 594 |
+
response_deserializers,
|
| 595 |
+
):
|
| 596 |
+
self._channel = channel
|
| 597 |
+
self._metadata_transformer = metadata_transformer
|
| 598 |
+
self._request_serializers = request_serializers or {}
|
| 599 |
+
self._response_deserializers = response_deserializers or {}
|
| 600 |
+
|
| 601 |
+
def blocking_unary_unary(
|
| 602 |
+
self,
|
| 603 |
+
group,
|
| 604 |
+
method,
|
| 605 |
+
request,
|
| 606 |
+
timeout,
|
| 607 |
+
metadata=None,
|
| 608 |
+
with_call=None,
|
| 609 |
+
protocol_options=None,
|
| 610 |
+
):
|
| 611 |
+
request_serializer = self._request_serializers.get(
|
| 612 |
+
(
|
| 613 |
+
group,
|
| 614 |
+
method,
|
| 615 |
+
)
|
| 616 |
+
)
|
| 617 |
+
response_deserializer = self._response_deserializers.get(
|
| 618 |
+
(
|
| 619 |
+
group,
|
| 620 |
+
method,
|
| 621 |
+
)
|
| 622 |
+
)
|
| 623 |
+
return _blocking_unary_unary(
|
| 624 |
+
self._channel,
|
| 625 |
+
group,
|
| 626 |
+
method,
|
| 627 |
+
timeout,
|
| 628 |
+
with_call,
|
| 629 |
+
protocol_options,
|
| 630 |
+
metadata,
|
| 631 |
+
self._metadata_transformer,
|
| 632 |
+
request,
|
| 633 |
+
request_serializer,
|
| 634 |
+
response_deserializer,
|
| 635 |
+
)
|
| 636 |
+
|
| 637 |
+
def future_unary_unary(
|
| 638 |
+
self,
|
| 639 |
+
group,
|
| 640 |
+
method,
|
| 641 |
+
request,
|
| 642 |
+
timeout,
|
| 643 |
+
metadata=None,
|
| 644 |
+
protocol_options=None,
|
| 645 |
+
):
|
| 646 |
+
request_serializer = self._request_serializers.get(
|
| 647 |
+
(
|
| 648 |
+
group,
|
| 649 |
+
method,
|
| 650 |
+
)
|
| 651 |
+
)
|
| 652 |
+
response_deserializer = self._response_deserializers.get(
|
| 653 |
+
(
|
| 654 |
+
group,
|
| 655 |
+
method,
|
| 656 |
+
)
|
| 657 |
+
)
|
| 658 |
+
return _future_unary_unary(
|
| 659 |
+
self._channel,
|
| 660 |
+
group,
|
| 661 |
+
method,
|
| 662 |
+
timeout,
|
| 663 |
+
protocol_options,
|
| 664 |
+
metadata,
|
| 665 |
+
self._metadata_transformer,
|
| 666 |
+
request,
|
| 667 |
+
request_serializer,
|
| 668 |
+
response_deserializer,
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
def inline_unary_stream(
|
| 672 |
+
self,
|
| 673 |
+
group,
|
| 674 |
+
method,
|
| 675 |
+
request,
|
| 676 |
+
timeout,
|
| 677 |
+
metadata=None,
|
| 678 |
+
protocol_options=None,
|
| 679 |
+
):
|
| 680 |
+
request_serializer = self._request_serializers.get(
|
| 681 |
+
(
|
| 682 |
+
group,
|
| 683 |
+
method,
|
| 684 |
+
)
|
| 685 |
+
)
|
| 686 |
+
response_deserializer = self._response_deserializers.get(
|
| 687 |
+
(
|
| 688 |
+
group,
|
| 689 |
+
method,
|
| 690 |
+
)
|
| 691 |
+
)
|
| 692 |
+
return _unary_stream(
|
| 693 |
+
self._channel,
|
| 694 |
+
group,
|
| 695 |
+
method,
|
| 696 |
+
timeout,
|
| 697 |
+
protocol_options,
|
| 698 |
+
metadata,
|
| 699 |
+
self._metadata_transformer,
|
| 700 |
+
request,
|
| 701 |
+
request_serializer,
|
| 702 |
+
response_deserializer,
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
def blocking_stream_unary(
|
| 706 |
+
self,
|
| 707 |
+
group,
|
| 708 |
+
method,
|
| 709 |
+
request_iterator,
|
| 710 |
+
timeout,
|
| 711 |
+
metadata=None,
|
| 712 |
+
with_call=None,
|
| 713 |
+
protocol_options=None,
|
| 714 |
+
):
|
| 715 |
+
request_serializer = self._request_serializers.get(
|
| 716 |
+
(
|
| 717 |
+
group,
|
| 718 |
+
method,
|
| 719 |
+
)
|
| 720 |
+
)
|
| 721 |
+
response_deserializer = self._response_deserializers.get(
|
| 722 |
+
(
|
| 723 |
+
group,
|
| 724 |
+
method,
|
| 725 |
+
)
|
| 726 |
+
)
|
| 727 |
+
return _blocking_stream_unary(
|
| 728 |
+
self._channel,
|
| 729 |
+
group,
|
| 730 |
+
method,
|
| 731 |
+
timeout,
|
| 732 |
+
with_call,
|
| 733 |
+
protocol_options,
|
| 734 |
+
metadata,
|
| 735 |
+
self._metadata_transformer,
|
| 736 |
+
request_iterator,
|
| 737 |
+
request_serializer,
|
| 738 |
+
response_deserializer,
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
def future_stream_unary(
|
| 742 |
+
self,
|
| 743 |
+
group,
|
| 744 |
+
method,
|
| 745 |
+
request_iterator,
|
| 746 |
+
timeout,
|
| 747 |
+
metadata=None,
|
| 748 |
+
protocol_options=None,
|
| 749 |
+
):
|
| 750 |
+
request_serializer = self._request_serializers.get(
|
| 751 |
+
(
|
| 752 |
+
group,
|
| 753 |
+
method,
|
| 754 |
+
)
|
| 755 |
+
)
|
| 756 |
+
response_deserializer = self._response_deserializers.get(
|
| 757 |
+
(
|
| 758 |
+
group,
|
| 759 |
+
method,
|
| 760 |
+
)
|
| 761 |
+
)
|
| 762 |
+
return _future_stream_unary(
|
| 763 |
+
self._channel,
|
| 764 |
+
group,
|
| 765 |
+
method,
|
| 766 |
+
timeout,
|
| 767 |
+
protocol_options,
|
| 768 |
+
metadata,
|
| 769 |
+
self._metadata_transformer,
|
| 770 |
+
request_iterator,
|
| 771 |
+
request_serializer,
|
| 772 |
+
response_deserializer,
|
| 773 |
+
)
|
| 774 |
+
|
| 775 |
+
def inline_stream_stream(
|
| 776 |
+
self,
|
| 777 |
+
group,
|
| 778 |
+
method,
|
| 779 |
+
request_iterator,
|
| 780 |
+
timeout,
|
| 781 |
+
metadata=None,
|
| 782 |
+
protocol_options=None,
|
| 783 |
+
):
|
| 784 |
+
request_serializer = self._request_serializers.get(
|
| 785 |
+
(
|
| 786 |
+
group,
|
| 787 |
+
method,
|
| 788 |
+
)
|
| 789 |
+
)
|
| 790 |
+
response_deserializer = self._response_deserializers.get(
|
| 791 |
+
(
|
| 792 |
+
group,
|
| 793 |
+
method,
|
| 794 |
+
)
|
| 795 |
+
)
|
| 796 |
+
return _stream_stream(
|
| 797 |
+
self._channel,
|
| 798 |
+
group,
|
| 799 |
+
method,
|
| 800 |
+
timeout,
|
| 801 |
+
protocol_options,
|
| 802 |
+
metadata,
|
| 803 |
+
self._metadata_transformer,
|
| 804 |
+
request_iterator,
|
| 805 |
+
request_serializer,
|
| 806 |
+
response_deserializer,
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
def event_unary_unary(
|
| 810 |
+
self,
|
| 811 |
+
group,
|
| 812 |
+
method,
|
| 813 |
+
request,
|
| 814 |
+
receiver,
|
| 815 |
+
abortion_callback,
|
| 816 |
+
timeout,
|
| 817 |
+
metadata=None,
|
| 818 |
+
protocol_options=None,
|
| 819 |
+
):
|
| 820 |
+
raise NotImplementedError()
|
| 821 |
+
|
| 822 |
+
def event_unary_stream(
|
| 823 |
+
self,
|
| 824 |
+
group,
|
| 825 |
+
method,
|
| 826 |
+
request,
|
| 827 |
+
receiver,
|
| 828 |
+
abortion_callback,
|
| 829 |
+
timeout,
|
| 830 |
+
metadata=None,
|
| 831 |
+
protocol_options=None,
|
| 832 |
+
):
|
| 833 |
+
raise NotImplementedError()
|
| 834 |
+
|
| 835 |
+
def event_stream_unary(
|
| 836 |
+
self,
|
| 837 |
+
group,
|
| 838 |
+
method,
|
| 839 |
+
receiver,
|
| 840 |
+
abortion_callback,
|
| 841 |
+
timeout,
|
| 842 |
+
metadata=None,
|
| 843 |
+
protocol_options=None,
|
| 844 |
+
):
|
| 845 |
+
raise NotImplementedError()
|
| 846 |
+
|
| 847 |
+
def event_stream_stream(
|
| 848 |
+
self,
|
| 849 |
+
group,
|
| 850 |
+
method,
|
| 851 |
+
receiver,
|
| 852 |
+
abortion_callback,
|
| 853 |
+
timeout,
|
| 854 |
+
metadata=None,
|
| 855 |
+
protocol_options=None,
|
| 856 |
+
):
|
| 857 |
+
raise NotImplementedError()
|
| 858 |
+
|
| 859 |
+
def unary_unary(self, group, method):
|
| 860 |
+
request_serializer = self._request_serializers.get(
|
| 861 |
+
(
|
| 862 |
+
group,
|
| 863 |
+
method,
|
| 864 |
+
)
|
| 865 |
+
)
|
| 866 |
+
response_deserializer = self._response_deserializers.get(
|
| 867 |
+
(
|
| 868 |
+
group,
|
| 869 |
+
method,
|
| 870 |
+
)
|
| 871 |
+
)
|
| 872 |
+
return _UnaryUnaryMultiCallable(
|
| 873 |
+
self._channel,
|
| 874 |
+
group,
|
| 875 |
+
method,
|
| 876 |
+
self._metadata_transformer,
|
| 877 |
+
request_serializer,
|
| 878 |
+
response_deserializer,
|
| 879 |
+
)
|
| 880 |
+
|
| 881 |
+
def unary_stream(self, group, method):
|
| 882 |
+
request_serializer = self._request_serializers.get(
|
| 883 |
+
(
|
| 884 |
+
group,
|
| 885 |
+
method,
|
| 886 |
+
)
|
| 887 |
+
)
|
| 888 |
+
response_deserializer = self._response_deserializers.get(
|
| 889 |
+
(
|
| 890 |
+
group,
|
| 891 |
+
method,
|
| 892 |
+
)
|
| 893 |
+
)
|
| 894 |
+
return _UnaryStreamMultiCallable(
|
| 895 |
+
self._channel,
|
| 896 |
+
group,
|
| 897 |
+
method,
|
| 898 |
+
self._metadata_transformer,
|
| 899 |
+
request_serializer,
|
| 900 |
+
response_deserializer,
|
| 901 |
+
)
|
| 902 |
+
|
| 903 |
+
def stream_unary(self, group, method):
|
| 904 |
+
request_serializer = self._request_serializers.get(
|
| 905 |
+
(
|
| 906 |
+
group,
|
| 907 |
+
method,
|
| 908 |
+
)
|
| 909 |
+
)
|
| 910 |
+
response_deserializer = self._response_deserializers.get(
|
| 911 |
+
(
|
| 912 |
+
group,
|
| 913 |
+
method,
|
| 914 |
+
)
|
| 915 |
+
)
|
| 916 |
+
return _StreamUnaryMultiCallable(
|
| 917 |
+
self._channel,
|
| 918 |
+
group,
|
| 919 |
+
method,
|
| 920 |
+
self._metadata_transformer,
|
| 921 |
+
request_serializer,
|
| 922 |
+
response_deserializer,
|
| 923 |
+
)
|
| 924 |
+
|
| 925 |
+
def stream_stream(self, group, method):
|
| 926 |
+
request_serializer = self._request_serializers.get(
|
| 927 |
+
(
|
| 928 |
+
group,
|
| 929 |
+
method,
|
| 930 |
+
)
|
| 931 |
+
)
|
| 932 |
+
response_deserializer = self._response_deserializers.get(
|
| 933 |
+
(
|
| 934 |
+
group,
|
| 935 |
+
method,
|
| 936 |
+
)
|
| 937 |
+
)
|
| 938 |
+
return _StreamStreamMultiCallable(
|
| 939 |
+
self._channel,
|
| 940 |
+
group,
|
| 941 |
+
method,
|
| 942 |
+
self._metadata_transformer,
|
| 943 |
+
request_serializer,
|
| 944 |
+
response_deserializer,
|
| 945 |
+
)
|
| 946 |
+
|
| 947 |
+
def __enter__(self):
|
| 948 |
+
return self
|
| 949 |
+
|
| 950 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 951 |
+
return False
|
| 952 |
+
|
| 953 |
+
|
| 954 |
+
class _DynamicStub(face.DynamicStub):
|
| 955 |
+
def __init__(self, backing_generic_stub, group, cardinalities):
|
| 956 |
+
self._generic_stub = backing_generic_stub
|
| 957 |
+
self._group = group
|
| 958 |
+
self._cardinalities = cardinalities
|
| 959 |
+
|
| 960 |
+
def __getattr__(self, attr):
|
| 961 |
+
method_cardinality = self._cardinalities.get(attr)
|
| 962 |
+
if method_cardinality is cardinality.Cardinality.UNARY_UNARY:
|
| 963 |
+
return self._generic_stub.unary_unary(self._group, attr)
|
| 964 |
+
elif method_cardinality is cardinality.Cardinality.UNARY_STREAM:
|
| 965 |
+
return self._generic_stub.unary_stream(self._group, attr)
|
| 966 |
+
elif method_cardinality is cardinality.Cardinality.STREAM_UNARY:
|
| 967 |
+
return self._generic_stub.stream_unary(self._group, attr)
|
| 968 |
+
elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
|
| 969 |
+
return self._generic_stub.stream_stream(self._group, attr)
|
| 970 |
+
else:
|
| 971 |
+
raise AttributeError(
|
| 972 |
+
'_DynamicStub object has no attribute "%s"!' % attr
|
| 973 |
+
)
|
| 974 |
+
|
| 975 |
+
def __enter__(self):
|
| 976 |
+
return self
|
| 977 |
+
|
| 978 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 979 |
+
return False
|
| 980 |
+
|
| 981 |
+
|
| 982 |
+
def generic_stub(
|
| 983 |
+
channel,
|
| 984 |
+
host,
|
| 985 |
+
metadata_transformer,
|
| 986 |
+
request_serializers,
|
| 987 |
+
response_deserializers,
|
| 988 |
+
):
|
| 989 |
+
return _GenericStub(
|
| 990 |
+
channel,
|
| 991 |
+
metadata_transformer,
|
| 992 |
+
request_serializers,
|
| 993 |
+
response_deserializers,
|
| 994 |
+
)
|
| 995 |
+
|
| 996 |
+
|
| 997 |
+
def dynamic_stub(
|
| 998 |
+
channel,
|
| 999 |
+
service,
|
| 1000 |
+
cardinalities,
|
| 1001 |
+
host,
|
| 1002 |
+
metadata_transformer,
|
| 1003 |
+
request_serializers,
|
| 1004 |
+
response_deserializers,
|
| 1005 |
+
):
|
| 1006 |
+
return _DynamicStub(
|
| 1007 |
+
_GenericStub(
|
| 1008 |
+
channel,
|
| 1009 |
+
metadata_transformer,
|
| 1010 |
+
request_serializers,
|
| 1011 |
+
response_deserializers,
|
| 1012 |
+
),
|
| 1013 |
+
service,
|
| 1014 |
+
cardinalities,
|
| 1015 |
+
)
|
lib/python3.10/site-packages/grpc/beta/_metadata.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 gRPC authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""API metadata conversion utilities."""
|
| 15 |
+
|
| 16 |
+
import collections
|
| 17 |
+
|
| 18 |
+
_Metadatum = collections.namedtuple(
|
| 19 |
+
"_Metadatum",
|
| 20 |
+
(
|
| 21 |
+
"key",
|
| 22 |
+
"value",
|
| 23 |
+
),
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _beta_metadatum(key, value):
|
| 28 |
+
beta_key = key if isinstance(key, (bytes,)) else key.encode("ascii")
|
| 29 |
+
beta_value = value if isinstance(value, (bytes,)) else value.encode("ascii")
|
| 30 |
+
return _Metadatum(beta_key, beta_value)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _metadatum(beta_key, beta_value):
|
| 34 |
+
key = beta_key if isinstance(beta_key, (str,)) else beta_key.decode("utf8")
|
| 35 |
+
if isinstance(beta_value, (str,)) or key[-4:] == "-bin":
|
| 36 |
+
value = beta_value
|
| 37 |
+
else:
|
| 38 |
+
value = beta_value.decode("utf8")
|
| 39 |
+
return _Metadatum(key, value)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def beta(metadata):
|
| 43 |
+
if metadata is None:
|
| 44 |
+
return ()
|
| 45 |
+
else:
|
| 46 |
+
return tuple(_beta_metadatum(key, value) for key, value in metadata)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def unbeta(beta_metadata):
|
| 50 |
+
if beta_metadata is None:
|
| 51 |
+
return ()
|
| 52 |
+
else:
|
| 53 |
+
return tuple(
|
| 54 |
+
_metadatum(beta_key, beta_value)
|
| 55 |
+
for beta_key, beta_value in beta_metadata
|
| 56 |
+
)
|
lib/python3.10/site-packages/grpc/beta/_server_adaptations.py
ADDED
|
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2016 gRPC authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
|
| 15 |
+
|
| 16 |
+
import collections
|
| 17 |
+
import threading
|
| 18 |
+
|
| 19 |
+
import grpc
|
| 20 |
+
from grpc import _common
|
| 21 |
+
from grpc.beta import _metadata
|
| 22 |
+
from grpc.beta import interfaces
|
| 23 |
+
from grpc.framework.common import cardinality
|
| 24 |
+
from grpc.framework.common import style
|
| 25 |
+
from grpc.framework.foundation import abandonment
|
| 26 |
+
from grpc.framework.foundation import logging_pool
|
| 27 |
+
from grpc.framework.foundation import stream
|
| 28 |
+
from grpc.framework.interfaces.face import face
|
| 29 |
+
|
| 30 |
+
# pylint: disable=too-many-return-statements
|
| 31 |
+
|
| 32 |
+
_DEFAULT_POOL_SIZE = 8
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class _ServerProtocolContext(interfaces.GRPCServicerContext):
|
| 36 |
+
def __init__(self, servicer_context):
|
| 37 |
+
self._servicer_context = servicer_context
|
| 38 |
+
|
| 39 |
+
def peer(self):
|
| 40 |
+
return self._servicer_context.peer()
|
| 41 |
+
|
| 42 |
+
def disable_next_response_compression(self):
|
| 43 |
+
pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class _FaceServicerContext(face.ServicerContext):
|
| 47 |
+
def __init__(self, servicer_context):
|
| 48 |
+
self._servicer_context = servicer_context
|
| 49 |
+
|
| 50 |
+
def is_active(self):
|
| 51 |
+
return self._servicer_context.is_active()
|
| 52 |
+
|
| 53 |
+
def time_remaining(self):
|
| 54 |
+
return self._servicer_context.time_remaining()
|
| 55 |
+
|
| 56 |
+
def add_abortion_callback(self, abortion_callback):
|
| 57 |
+
raise NotImplementedError(
|
| 58 |
+
"add_abortion_callback no longer supported server-side!"
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
def cancel(self):
|
| 62 |
+
self._servicer_context.cancel()
|
| 63 |
+
|
| 64 |
+
def protocol_context(self):
|
| 65 |
+
return _ServerProtocolContext(self._servicer_context)
|
| 66 |
+
|
| 67 |
+
def invocation_metadata(self):
|
| 68 |
+
return _metadata.beta(self._servicer_context.invocation_metadata())
|
| 69 |
+
|
| 70 |
+
def initial_metadata(self, initial_metadata):
|
| 71 |
+
self._servicer_context.send_initial_metadata(
|
| 72 |
+
_metadata.unbeta(initial_metadata)
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
def terminal_metadata(self, terminal_metadata):
|
| 76 |
+
self._servicer_context.set_terminal_metadata(
|
| 77 |
+
_metadata.unbeta(terminal_metadata)
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
def code(self, code):
|
| 81 |
+
self._servicer_context.set_code(code)
|
| 82 |
+
|
| 83 |
+
def details(self, details):
|
| 84 |
+
self._servicer_context.set_details(details)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _adapt_unary_request_inline(unary_request_inline):
|
| 88 |
+
def adaptation(request, servicer_context):
|
| 89 |
+
return unary_request_inline(
|
| 90 |
+
request, _FaceServicerContext(servicer_context)
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
return adaptation
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _adapt_stream_request_inline(stream_request_inline):
|
| 97 |
+
def adaptation(request_iterator, servicer_context):
|
| 98 |
+
return stream_request_inline(
|
| 99 |
+
request_iterator, _FaceServicerContext(servicer_context)
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
return adaptation
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
class _Callback(stream.Consumer):
|
| 106 |
+
def __init__(self):
|
| 107 |
+
self._condition = threading.Condition()
|
| 108 |
+
self._values = []
|
| 109 |
+
self._terminated = False
|
| 110 |
+
self._cancelled = False
|
| 111 |
+
|
| 112 |
+
def consume(self, value):
|
| 113 |
+
with self._condition:
|
| 114 |
+
self._values.append(value)
|
| 115 |
+
self._condition.notify_all()
|
| 116 |
+
|
| 117 |
+
def terminate(self):
|
| 118 |
+
with self._condition:
|
| 119 |
+
self._terminated = True
|
| 120 |
+
self._condition.notify_all()
|
| 121 |
+
|
| 122 |
+
def consume_and_terminate(self, value):
|
| 123 |
+
with self._condition:
|
| 124 |
+
self._values.append(value)
|
| 125 |
+
self._terminated = True
|
| 126 |
+
self._condition.notify_all()
|
| 127 |
+
|
| 128 |
+
def cancel(self):
|
| 129 |
+
with self._condition:
|
| 130 |
+
self._cancelled = True
|
| 131 |
+
self._condition.notify_all()
|
| 132 |
+
|
| 133 |
+
def draw_one_value(self):
|
| 134 |
+
with self._condition:
|
| 135 |
+
while True:
|
| 136 |
+
if self._cancelled:
|
| 137 |
+
raise abandonment.Abandoned()
|
| 138 |
+
elif self._values:
|
| 139 |
+
return self._values.pop(0)
|
| 140 |
+
elif self._terminated:
|
| 141 |
+
return None
|
| 142 |
+
else:
|
| 143 |
+
self._condition.wait()
|
| 144 |
+
|
| 145 |
+
def draw_all_values(self):
|
| 146 |
+
with self._condition:
|
| 147 |
+
while True:
|
| 148 |
+
if self._cancelled:
|
| 149 |
+
raise abandonment.Abandoned()
|
| 150 |
+
elif self._terminated:
|
| 151 |
+
all_values = tuple(self._values)
|
| 152 |
+
self._values = None
|
| 153 |
+
return all_values
|
| 154 |
+
else:
|
| 155 |
+
self._condition.wait()
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def _run_request_pipe_thread(
|
| 159 |
+
request_iterator, request_consumer, servicer_context
|
| 160 |
+
):
|
| 161 |
+
thread_joined = threading.Event()
|
| 162 |
+
|
| 163 |
+
def pipe_requests():
|
| 164 |
+
for request in request_iterator:
|
| 165 |
+
if not servicer_context.is_active() or thread_joined.is_set():
|
| 166 |
+
return
|
| 167 |
+
request_consumer.consume(request)
|
| 168 |
+
if not servicer_context.is_active() or thread_joined.is_set():
|
| 169 |
+
return
|
| 170 |
+
request_consumer.terminate()
|
| 171 |
+
|
| 172 |
+
request_pipe_thread = threading.Thread(target=pipe_requests)
|
| 173 |
+
request_pipe_thread.daemon = True
|
| 174 |
+
request_pipe_thread.start()
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def _adapt_unary_unary_event(unary_unary_event):
|
| 178 |
+
def adaptation(request, servicer_context):
|
| 179 |
+
callback = _Callback()
|
| 180 |
+
if not servicer_context.add_callback(callback.cancel):
|
| 181 |
+
raise abandonment.Abandoned()
|
| 182 |
+
unary_unary_event(
|
| 183 |
+
request,
|
| 184 |
+
callback.consume_and_terminate,
|
| 185 |
+
_FaceServicerContext(servicer_context),
|
| 186 |
+
)
|
| 187 |
+
return callback.draw_all_values()[0]
|
| 188 |
+
|
| 189 |
+
return adaptation
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def _adapt_unary_stream_event(unary_stream_event):
|
| 193 |
+
def adaptation(request, servicer_context):
|
| 194 |
+
callback = _Callback()
|
| 195 |
+
if not servicer_context.add_callback(callback.cancel):
|
| 196 |
+
raise abandonment.Abandoned()
|
| 197 |
+
unary_stream_event(
|
| 198 |
+
request, callback, _FaceServicerContext(servicer_context)
|
| 199 |
+
)
|
| 200 |
+
while True:
|
| 201 |
+
response = callback.draw_one_value()
|
| 202 |
+
if response is None:
|
| 203 |
+
return
|
| 204 |
+
else:
|
| 205 |
+
yield response
|
| 206 |
+
|
| 207 |
+
return adaptation
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def _adapt_stream_unary_event(stream_unary_event):
|
| 211 |
+
def adaptation(request_iterator, servicer_context):
|
| 212 |
+
callback = _Callback()
|
| 213 |
+
if not servicer_context.add_callback(callback.cancel):
|
| 214 |
+
raise abandonment.Abandoned()
|
| 215 |
+
request_consumer = stream_unary_event(
|
| 216 |
+
callback.consume_and_terminate,
|
| 217 |
+
_FaceServicerContext(servicer_context),
|
| 218 |
+
)
|
| 219 |
+
_run_request_pipe_thread(
|
| 220 |
+
request_iterator, request_consumer, servicer_context
|
| 221 |
+
)
|
| 222 |
+
return callback.draw_all_values()[0]
|
| 223 |
+
|
| 224 |
+
return adaptation
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def _adapt_stream_stream_event(stream_stream_event):
|
| 228 |
+
def adaptation(request_iterator, servicer_context):
|
| 229 |
+
callback = _Callback()
|
| 230 |
+
if not servicer_context.add_callback(callback.cancel):
|
| 231 |
+
raise abandonment.Abandoned()
|
| 232 |
+
request_consumer = stream_stream_event(
|
| 233 |
+
callback, _FaceServicerContext(servicer_context)
|
| 234 |
+
)
|
| 235 |
+
_run_request_pipe_thread(
|
| 236 |
+
request_iterator, request_consumer, servicer_context
|
| 237 |
+
)
|
| 238 |
+
while True:
|
| 239 |
+
response = callback.draw_one_value()
|
| 240 |
+
if response is None:
|
| 241 |
+
return
|
| 242 |
+
else:
|
| 243 |
+
yield response
|
| 244 |
+
|
| 245 |
+
return adaptation
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
class _SimpleMethodHandler(
|
| 249 |
+
collections.namedtuple(
|
| 250 |
+
"_MethodHandler",
|
| 251 |
+
(
|
| 252 |
+
"request_streaming",
|
| 253 |
+
"response_streaming",
|
| 254 |
+
"request_deserializer",
|
| 255 |
+
"response_serializer",
|
| 256 |
+
"unary_unary",
|
| 257 |
+
"unary_stream",
|
| 258 |
+
"stream_unary",
|
| 259 |
+
"stream_stream",
|
| 260 |
+
),
|
| 261 |
+
),
|
| 262 |
+
grpc.RpcMethodHandler,
|
| 263 |
+
):
|
| 264 |
+
pass
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def _simple_method_handler(
|
| 268 |
+
implementation, request_deserializer, response_serializer
|
| 269 |
+
):
|
| 270 |
+
if implementation.style is style.Service.INLINE:
|
| 271 |
+
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
|
| 272 |
+
return _SimpleMethodHandler(
|
| 273 |
+
False,
|
| 274 |
+
False,
|
| 275 |
+
request_deserializer,
|
| 276 |
+
response_serializer,
|
| 277 |
+
_adapt_unary_request_inline(implementation.unary_unary_inline),
|
| 278 |
+
None,
|
| 279 |
+
None,
|
| 280 |
+
None,
|
| 281 |
+
)
|
| 282 |
+
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
|
| 283 |
+
return _SimpleMethodHandler(
|
| 284 |
+
False,
|
| 285 |
+
True,
|
| 286 |
+
request_deserializer,
|
| 287 |
+
response_serializer,
|
| 288 |
+
None,
|
| 289 |
+
_adapt_unary_request_inline(implementation.unary_stream_inline),
|
| 290 |
+
None,
|
| 291 |
+
None,
|
| 292 |
+
)
|
| 293 |
+
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
|
| 294 |
+
return _SimpleMethodHandler(
|
| 295 |
+
True,
|
| 296 |
+
False,
|
| 297 |
+
request_deserializer,
|
| 298 |
+
response_serializer,
|
| 299 |
+
None,
|
| 300 |
+
None,
|
| 301 |
+
_adapt_stream_request_inline(
|
| 302 |
+
implementation.stream_unary_inline
|
| 303 |
+
),
|
| 304 |
+
None,
|
| 305 |
+
)
|
| 306 |
+
elif (
|
| 307 |
+
implementation.cardinality is cardinality.Cardinality.STREAM_STREAM
|
| 308 |
+
):
|
| 309 |
+
return _SimpleMethodHandler(
|
| 310 |
+
True,
|
| 311 |
+
True,
|
| 312 |
+
request_deserializer,
|
| 313 |
+
response_serializer,
|
| 314 |
+
None,
|
| 315 |
+
None,
|
| 316 |
+
None,
|
| 317 |
+
_adapt_stream_request_inline(
|
| 318 |
+
implementation.stream_stream_inline
|
| 319 |
+
),
|
| 320 |
+
)
|
| 321 |
+
elif implementation.style is style.Service.EVENT:
|
| 322 |
+
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
|
| 323 |
+
return _SimpleMethodHandler(
|
| 324 |
+
False,
|
| 325 |
+
False,
|
| 326 |
+
request_deserializer,
|
| 327 |
+
response_serializer,
|
| 328 |
+
_adapt_unary_unary_event(implementation.unary_unary_event),
|
| 329 |
+
None,
|
| 330 |
+
None,
|
| 331 |
+
None,
|
| 332 |
+
)
|
| 333 |
+
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
|
| 334 |
+
return _SimpleMethodHandler(
|
| 335 |
+
False,
|
| 336 |
+
True,
|
| 337 |
+
request_deserializer,
|
| 338 |
+
response_serializer,
|
| 339 |
+
None,
|
| 340 |
+
_adapt_unary_stream_event(implementation.unary_stream_event),
|
| 341 |
+
None,
|
| 342 |
+
None,
|
| 343 |
+
)
|
| 344 |
+
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
|
| 345 |
+
return _SimpleMethodHandler(
|
| 346 |
+
True,
|
| 347 |
+
False,
|
| 348 |
+
request_deserializer,
|
| 349 |
+
response_serializer,
|
| 350 |
+
None,
|
| 351 |
+
None,
|
| 352 |
+
_adapt_stream_unary_event(implementation.stream_unary_event),
|
| 353 |
+
None,
|
| 354 |
+
)
|
| 355 |
+
elif (
|
| 356 |
+
implementation.cardinality is cardinality.Cardinality.STREAM_STREAM
|
| 357 |
+
):
|
| 358 |
+
return _SimpleMethodHandler(
|
| 359 |
+
True,
|
| 360 |
+
True,
|
| 361 |
+
request_deserializer,
|
| 362 |
+
response_serializer,
|
| 363 |
+
None,
|
| 364 |
+
None,
|
| 365 |
+
None,
|
| 366 |
+
_adapt_stream_stream_event(implementation.stream_stream_event),
|
| 367 |
+
)
|
| 368 |
+
raise ValueError()
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def _flatten_method_pair_map(method_pair_map):
|
| 372 |
+
method_pair_map = method_pair_map or {}
|
| 373 |
+
flat_map = {}
|
| 374 |
+
for method_pair in method_pair_map:
|
| 375 |
+
method = _common.fully_qualified_method(method_pair[0], method_pair[1])
|
| 376 |
+
flat_map[method] = method_pair_map[method_pair]
|
| 377 |
+
return flat_map
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
class _GenericRpcHandler(grpc.GenericRpcHandler):
|
| 381 |
+
def __init__(
|
| 382 |
+
self,
|
| 383 |
+
method_implementations,
|
| 384 |
+
multi_method_implementation,
|
| 385 |
+
request_deserializers,
|
| 386 |
+
response_serializers,
|
| 387 |
+
):
|
| 388 |
+
self._method_implementations = _flatten_method_pair_map(
|
| 389 |
+
method_implementations
|
| 390 |
+
)
|
| 391 |
+
self._request_deserializers = _flatten_method_pair_map(
|
| 392 |
+
request_deserializers
|
| 393 |
+
)
|
| 394 |
+
self._response_serializers = _flatten_method_pair_map(
|
| 395 |
+
response_serializers
|
| 396 |
+
)
|
| 397 |
+
self._multi_method_implementation = multi_method_implementation
|
| 398 |
+
|
| 399 |
+
def service(self, handler_call_details):
|
| 400 |
+
method_implementation = self._method_implementations.get(
|
| 401 |
+
handler_call_details.method
|
| 402 |
+
)
|
| 403 |
+
if method_implementation is not None:
|
| 404 |
+
return _simple_method_handler(
|
| 405 |
+
method_implementation,
|
| 406 |
+
self._request_deserializers.get(handler_call_details.method),
|
| 407 |
+
self._response_serializers.get(handler_call_details.method),
|
| 408 |
+
)
|
| 409 |
+
elif self._multi_method_implementation is None:
|
| 410 |
+
return None
|
| 411 |
+
else:
|
| 412 |
+
try:
|
| 413 |
+
return None # TODO(nathaniel): call the multimethod.
|
| 414 |
+
except face.NoSuchMethodError:
|
| 415 |
+
return None
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
class _Server(interfaces.Server):
|
| 419 |
+
def __init__(self, grpc_server):
|
| 420 |
+
self._grpc_server = grpc_server
|
| 421 |
+
|
| 422 |
+
def add_insecure_port(self, address):
|
| 423 |
+
return self._grpc_server.add_insecure_port(address)
|
| 424 |
+
|
| 425 |
+
def add_secure_port(self, address, server_credentials):
|
| 426 |
+
return self._grpc_server.add_secure_port(address, server_credentials)
|
| 427 |
+
|
| 428 |
+
def start(self):
|
| 429 |
+
self._grpc_server.start()
|
| 430 |
+
|
| 431 |
+
def stop(self, grace):
|
| 432 |
+
return self._grpc_server.stop(grace)
|
| 433 |
+
|
| 434 |
+
def __enter__(self):
|
| 435 |
+
self._grpc_server.start()
|
| 436 |
+
return self
|
| 437 |
+
|
| 438 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 439 |
+
self._grpc_server.stop(None)
|
| 440 |
+
return False
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
def server(
|
| 444 |
+
service_implementations,
|
| 445 |
+
multi_method_implementation,
|
| 446 |
+
request_deserializers,
|
| 447 |
+
response_serializers,
|
| 448 |
+
thread_pool,
|
| 449 |
+
thread_pool_size,
|
| 450 |
+
):
|
| 451 |
+
generic_rpc_handler = _GenericRpcHandler(
|
| 452 |
+
service_implementations,
|
| 453 |
+
multi_method_implementation,
|
| 454 |
+
request_deserializers,
|
| 455 |
+
response_serializers,
|
| 456 |
+
)
|
| 457 |
+
if thread_pool is None:
|
| 458 |
+
effective_thread_pool = logging_pool.pool(
|
| 459 |
+
_DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size
|
| 460 |
+
)
|
| 461 |
+
else:
|
| 462 |
+
effective_thread_pool = thread_pool
|
| 463 |
+
return _Server(
|
| 464 |
+
grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,))
|
| 465 |
+
)
|
lib/python3.10/site-packages/grpc/beta/implementations.py
ADDED
|
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015-2016 gRPC authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Entry points into the Beta API of gRPC Python."""
|
| 15 |
+
|
| 16 |
+
# threading is referenced from specification in this module.
|
| 17 |
+
import threading # pylint: disable=unused-import
|
| 18 |
+
|
| 19 |
+
# interfaces, cardinality, and face are referenced from specification in this
|
| 20 |
+
# module.
|
| 21 |
+
import grpc
|
| 22 |
+
from grpc import _auth
|
| 23 |
+
from grpc.beta import _client_adaptations
|
| 24 |
+
from grpc.beta import _metadata
|
| 25 |
+
from grpc.beta import _server_adaptations
|
| 26 |
+
from grpc.beta import interfaces # pylint: disable=unused-import
|
| 27 |
+
from grpc.framework.common import cardinality # pylint: disable=unused-import
|
| 28 |
+
from grpc.framework.interfaces.face import face # pylint: disable=unused-import
|
| 29 |
+
|
| 30 |
+
# pylint: disable=too-many-arguments
|
| 31 |
+
|
| 32 |
+
ChannelCredentials = grpc.ChannelCredentials
|
| 33 |
+
ssl_channel_credentials = grpc.ssl_channel_credentials
|
| 34 |
+
CallCredentials = grpc.CallCredentials
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def metadata_call_credentials(metadata_plugin, name=None):
|
| 38 |
+
def plugin(context, callback):
|
| 39 |
+
def wrapped_callback(beta_metadata, error):
|
| 40 |
+
callback(_metadata.unbeta(beta_metadata), error)
|
| 41 |
+
|
| 42 |
+
metadata_plugin(context, wrapped_callback)
|
| 43 |
+
|
| 44 |
+
return grpc.metadata_call_credentials(plugin, name=name)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def google_call_credentials(credentials):
|
| 48 |
+
"""Construct CallCredentials from GoogleCredentials.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
credentials: A GoogleCredentials object from the oauth2client library.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
A CallCredentials object for use in a GRPCCallOptions object.
|
| 55 |
+
"""
|
| 56 |
+
return metadata_call_credentials(_auth.GoogleCallCredentials(credentials))
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
access_token_call_credentials = grpc.access_token_call_credentials
|
| 60 |
+
composite_call_credentials = grpc.composite_call_credentials
|
| 61 |
+
composite_channel_credentials = grpc.composite_channel_credentials
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class Channel(object):
|
| 65 |
+
"""A channel to a remote host through which RPCs may be conducted.
|
| 66 |
+
|
| 67 |
+
Only the "subscribe" and "unsubscribe" methods are supported for application
|
| 68 |
+
use. This class' instance constructor and all other attributes are
|
| 69 |
+
unsupported.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(self, channel):
|
| 73 |
+
self._channel = channel
|
| 74 |
+
|
| 75 |
+
def subscribe(self, callback, try_to_connect=None):
|
| 76 |
+
"""Subscribes to this Channel's connectivity.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
callback: A callable to be invoked and passed an
|
| 80 |
+
interfaces.ChannelConnectivity identifying this Channel's connectivity.
|
| 81 |
+
The callable will be invoked immediately upon subscription and again for
|
| 82 |
+
every change to this Channel's connectivity thereafter until it is
|
| 83 |
+
unsubscribed.
|
| 84 |
+
try_to_connect: A boolean indicating whether or not this Channel should
|
| 85 |
+
attempt to connect if it is not already connected and ready to conduct
|
| 86 |
+
RPCs.
|
| 87 |
+
"""
|
| 88 |
+
self._channel.subscribe(callback, try_to_connect=try_to_connect)
|
| 89 |
+
|
| 90 |
+
def unsubscribe(self, callback):
|
| 91 |
+
"""Unsubscribes a callback from this Channel's connectivity.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
callback: A callable previously registered with this Channel from having
|
| 95 |
+
been passed to its "subscribe" method.
|
| 96 |
+
"""
|
| 97 |
+
self._channel.unsubscribe(callback)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def insecure_channel(host, port):
|
| 101 |
+
"""Creates an insecure Channel to a remote host.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
host: The name of the remote host to which to connect.
|
| 105 |
+
port: The port of the remote host to which to connect.
|
| 106 |
+
If None only the 'host' part will be used.
|
| 107 |
+
|
| 108 |
+
Returns:
|
| 109 |
+
A Channel to the remote host through which RPCs may be conducted.
|
| 110 |
+
"""
|
| 111 |
+
channel = grpc.insecure_channel(
|
| 112 |
+
host if port is None else "%s:%d" % (host, port)
|
| 113 |
+
)
|
| 114 |
+
return Channel(channel)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def secure_channel(host, port, channel_credentials):
|
| 118 |
+
"""Creates a secure Channel to a remote host.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
host: The name of the remote host to which to connect.
|
| 122 |
+
port: The port of the remote host to which to connect.
|
| 123 |
+
If None only the 'host' part will be used.
|
| 124 |
+
channel_credentials: A ChannelCredentials.
|
| 125 |
+
|
| 126 |
+
Returns:
|
| 127 |
+
A secure Channel to the remote host through which RPCs may be conducted.
|
| 128 |
+
"""
|
| 129 |
+
channel = grpc.secure_channel(
|
| 130 |
+
host if port is None else "%s:%d" % (host, port), channel_credentials
|
| 131 |
+
)
|
| 132 |
+
return Channel(channel)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class StubOptions(object):
|
| 136 |
+
"""A value encapsulating the various options for creation of a Stub.
|
| 137 |
+
|
| 138 |
+
This class and its instances have no supported interface - it exists to define
|
| 139 |
+
the type of its instances and its instances exist to be passed to other
|
| 140 |
+
functions.
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
def __init__(
|
| 144 |
+
self,
|
| 145 |
+
host,
|
| 146 |
+
request_serializers,
|
| 147 |
+
response_deserializers,
|
| 148 |
+
metadata_transformer,
|
| 149 |
+
thread_pool,
|
| 150 |
+
thread_pool_size,
|
| 151 |
+
):
|
| 152 |
+
self.host = host
|
| 153 |
+
self.request_serializers = request_serializers
|
| 154 |
+
self.response_deserializers = response_deserializers
|
| 155 |
+
self.metadata_transformer = metadata_transformer
|
| 156 |
+
self.thread_pool = thread_pool
|
| 157 |
+
self.thread_pool_size = thread_pool_size
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
_EMPTY_STUB_OPTIONS = StubOptions(None, None, None, None, None, None)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def stub_options(
|
| 164 |
+
host=None,
|
| 165 |
+
request_serializers=None,
|
| 166 |
+
response_deserializers=None,
|
| 167 |
+
metadata_transformer=None,
|
| 168 |
+
thread_pool=None,
|
| 169 |
+
thread_pool_size=None,
|
| 170 |
+
):
|
| 171 |
+
"""Creates a StubOptions value to be passed at stub creation.
|
| 172 |
+
|
| 173 |
+
All parameters are optional and should always be passed by keyword.
|
| 174 |
+
|
| 175 |
+
Args:
|
| 176 |
+
host: A host string to set on RPC calls.
|
| 177 |
+
request_serializers: A dictionary from service name-method name pair to
|
| 178 |
+
request serialization behavior.
|
| 179 |
+
response_deserializers: A dictionary from service name-method name pair to
|
| 180 |
+
response deserialization behavior.
|
| 181 |
+
metadata_transformer: A callable that given a metadata object produces
|
| 182 |
+
another metadata object to be used in the underlying communication on the
|
| 183 |
+
wire.
|
| 184 |
+
thread_pool: A thread pool to use in stubs.
|
| 185 |
+
thread_pool_size: The size of thread pool to create for use in stubs;
|
| 186 |
+
ignored if thread_pool has been passed.
|
| 187 |
+
|
| 188 |
+
Returns:
|
| 189 |
+
A StubOptions value created from the passed parameters.
|
| 190 |
+
"""
|
| 191 |
+
return StubOptions(
|
| 192 |
+
host,
|
| 193 |
+
request_serializers,
|
| 194 |
+
response_deserializers,
|
| 195 |
+
metadata_transformer,
|
| 196 |
+
thread_pool,
|
| 197 |
+
thread_pool_size,
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def generic_stub(channel, options=None):
|
| 202 |
+
"""Creates a face.GenericStub on which RPCs can be made.
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
channel: A Channel for use by the created stub.
|
| 206 |
+
options: A StubOptions customizing the created stub.
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
A face.GenericStub on which RPCs can be made.
|
| 210 |
+
"""
|
| 211 |
+
effective_options = _EMPTY_STUB_OPTIONS if options is None else options
|
| 212 |
+
return _client_adaptations.generic_stub(
|
| 213 |
+
channel._channel, # pylint: disable=protected-access
|
| 214 |
+
effective_options.host,
|
| 215 |
+
effective_options.metadata_transformer,
|
| 216 |
+
effective_options.request_serializers,
|
| 217 |
+
effective_options.response_deserializers,
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def dynamic_stub(channel, service, cardinalities, options=None):
|
| 222 |
+
"""Creates a face.DynamicStub with which RPCs can be invoked.
|
| 223 |
+
|
| 224 |
+
Args:
|
| 225 |
+
channel: A Channel for the returned face.DynamicStub to use.
|
| 226 |
+
service: The package-qualified full name of the service.
|
| 227 |
+
cardinalities: A dictionary from RPC method name to cardinality.Cardinality
|
| 228 |
+
value identifying the cardinality of the RPC method.
|
| 229 |
+
options: An optional StubOptions value further customizing the functionality
|
| 230 |
+
of the returned face.DynamicStub.
|
| 231 |
+
|
| 232 |
+
Returns:
|
| 233 |
+
A face.DynamicStub with which RPCs can be invoked.
|
| 234 |
+
"""
|
| 235 |
+
effective_options = _EMPTY_STUB_OPTIONS if options is None else options
|
| 236 |
+
return _client_adaptations.dynamic_stub(
|
| 237 |
+
channel._channel, # pylint: disable=protected-access
|
| 238 |
+
service,
|
| 239 |
+
cardinalities,
|
| 240 |
+
effective_options.host,
|
| 241 |
+
effective_options.metadata_transformer,
|
| 242 |
+
effective_options.request_serializers,
|
| 243 |
+
effective_options.response_deserializers,
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
ServerCredentials = grpc.ServerCredentials
|
| 248 |
+
ssl_server_credentials = grpc.ssl_server_credentials
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class ServerOptions(object):
|
| 252 |
+
"""A value encapsulating the various options for creation of a Server.
|
| 253 |
+
|
| 254 |
+
This class and its instances have no supported interface - it exists to define
|
| 255 |
+
the type of its instances and its instances exist to be passed to other
|
| 256 |
+
functions.
|
| 257 |
+
"""
|
| 258 |
+
|
| 259 |
+
def __init__(
|
| 260 |
+
self,
|
| 261 |
+
multi_method_implementation,
|
| 262 |
+
request_deserializers,
|
| 263 |
+
response_serializers,
|
| 264 |
+
thread_pool,
|
| 265 |
+
thread_pool_size,
|
| 266 |
+
default_timeout,
|
| 267 |
+
maximum_timeout,
|
| 268 |
+
):
|
| 269 |
+
self.multi_method_implementation = multi_method_implementation
|
| 270 |
+
self.request_deserializers = request_deserializers
|
| 271 |
+
self.response_serializers = response_serializers
|
| 272 |
+
self.thread_pool = thread_pool
|
| 273 |
+
self.thread_pool_size = thread_pool_size
|
| 274 |
+
self.default_timeout = default_timeout
|
| 275 |
+
self.maximum_timeout = maximum_timeout
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
_EMPTY_SERVER_OPTIONS = ServerOptions(None, None, None, None, None, None, None)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def server_options(
|
| 282 |
+
multi_method_implementation=None,
|
| 283 |
+
request_deserializers=None,
|
| 284 |
+
response_serializers=None,
|
| 285 |
+
thread_pool=None,
|
| 286 |
+
thread_pool_size=None,
|
| 287 |
+
default_timeout=None,
|
| 288 |
+
maximum_timeout=None,
|
| 289 |
+
):
|
| 290 |
+
"""Creates a ServerOptions value to be passed at server creation.
|
| 291 |
+
|
| 292 |
+
All parameters are optional and should always be passed by keyword.
|
| 293 |
+
|
| 294 |
+
Args:
|
| 295 |
+
multi_method_implementation: A face.MultiMethodImplementation to be called
|
| 296 |
+
to service an RPC if the server has no specific method implementation for
|
| 297 |
+
the name of the RPC for which service was requested.
|
| 298 |
+
request_deserializers: A dictionary from service name-method name pair to
|
| 299 |
+
request deserialization behavior.
|
| 300 |
+
response_serializers: A dictionary from service name-method name pair to
|
| 301 |
+
response serialization behavior.
|
| 302 |
+
thread_pool: A thread pool to use in stubs.
|
| 303 |
+
thread_pool_size: The size of thread pool to create for use in stubs;
|
| 304 |
+
ignored if thread_pool has been passed.
|
| 305 |
+
default_timeout: A duration in seconds to allow for RPC service when
|
| 306 |
+
servicing RPCs that did not include a timeout value when invoked.
|
| 307 |
+
maximum_timeout: A duration in seconds to allow for RPC service when
|
| 308 |
+
servicing RPCs no matter what timeout value was passed when the RPC was
|
| 309 |
+
invoked.
|
| 310 |
+
|
| 311 |
+
Returns:
|
| 312 |
+
A StubOptions value created from the passed parameters.
|
| 313 |
+
"""
|
| 314 |
+
return ServerOptions(
|
| 315 |
+
multi_method_implementation,
|
| 316 |
+
request_deserializers,
|
| 317 |
+
response_serializers,
|
| 318 |
+
thread_pool,
|
| 319 |
+
thread_pool_size,
|
| 320 |
+
default_timeout,
|
| 321 |
+
maximum_timeout,
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def server(service_implementations, options=None):
|
| 326 |
+
"""Creates an interfaces.Server with which RPCs can be serviced.
|
| 327 |
+
|
| 328 |
+
Args:
|
| 329 |
+
service_implementations: A dictionary from service name-method name pair to
|
| 330 |
+
face.MethodImplementation.
|
| 331 |
+
options: An optional ServerOptions value further customizing the
|
| 332 |
+
functionality of the returned Server.
|
| 333 |
+
|
| 334 |
+
Returns:
|
| 335 |
+
An interfaces.Server with which RPCs can be serviced.
|
| 336 |
+
"""
|
| 337 |
+
effective_options = _EMPTY_SERVER_OPTIONS if options is None else options
|
| 338 |
+
return _server_adaptations.server(
|
| 339 |
+
service_implementations,
|
| 340 |
+
effective_options.multi_method_implementation,
|
| 341 |
+
effective_options.request_deserializers,
|
| 342 |
+
effective_options.response_serializers,
|
| 343 |
+
effective_options.thread_pool,
|
| 344 |
+
effective_options.thread_pool_size,
|
| 345 |
+
)
|
lib/python3.10/site-packages/grpc/beta/interfaces.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 gRPC authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Constants and interfaces of the Beta API of gRPC Python."""
|
| 15 |
+
|
| 16 |
+
import abc
|
| 17 |
+
|
| 18 |
+
import grpc
|
| 19 |
+
|
| 20 |
+
ChannelConnectivity = grpc.ChannelConnectivity
|
| 21 |
+
# FATAL_FAILURE was a Beta-API name for SHUTDOWN
|
| 22 |
+
ChannelConnectivity.FATAL_FAILURE = ChannelConnectivity.SHUTDOWN
|
| 23 |
+
|
| 24 |
+
StatusCode = grpc.StatusCode
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class GRPCCallOptions(object):
|
| 28 |
+
"""A value encapsulating gRPC-specific options passed on RPC invocation.
|
| 29 |
+
|
| 30 |
+
This class and its instances have no supported interface - it exists to
|
| 31 |
+
define the type of its instances and its instances exist to be passed to
|
| 32 |
+
other functions.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, disable_compression, subcall_of, credentials):
|
| 36 |
+
self.disable_compression = disable_compression
|
| 37 |
+
self.subcall_of = subcall_of
|
| 38 |
+
self.credentials = credentials
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def grpc_call_options(disable_compression=False, credentials=None):
|
| 42 |
+
"""Creates a GRPCCallOptions value to be passed at RPC invocation.
|
| 43 |
+
|
| 44 |
+
All parameters are optional and should always be passed by keyword.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
disable_compression: A boolean indicating whether or not compression should
|
| 48 |
+
be disabled for the request object of the RPC. Only valid for
|
| 49 |
+
request-unary RPCs.
|
| 50 |
+
credentials: A CallCredentials object to use for the invoked RPC.
|
| 51 |
+
"""
|
| 52 |
+
return GRPCCallOptions(disable_compression, None, credentials)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
GRPCAuthMetadataContext = grpc.AuthMetadataContext
|
| 56 |
+
GRPCAuthMetadataPluginCallback = grpc.AuthMetadataPluginCallback
|
| 57 |
+
GRPCAuthMetadataPlugin = grpc.AuthMetadataPlugin
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class GRPCServicerContext(abc.ABC):
|
| 61 |
+
"""Exposes gRPC-specific options and behaviors to code servicing RPCs."""
|
| 62 |
+
|
| 63 |
+
@abc.abstractmethod
|
| 64 |
+
def peer(self):
|
| 65 |
+
"""Identifies the peer that invoked the RPC being serviced.
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
A string identifying the peer that invoked the RPC being serviced.
|
| 69 |
+
"""
|
| 70 |
+
raise NotImplementedError()
|
| 71 |
+
|
| 72 |
+
@abc.abstractmethod
|
| 73 |
+
def disable_next_response_compression(self):
|
| 74 |
+
"""Disables compression of the next response passed by the application."""
|
| 75 |
+
raise NotImplementedError()
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class GRPCInvocationContext(abc.ABC):
|
| 79 |
+
"""Exposes gRPC-specific options and behaviors to code invoking RPCs."""
|
| 80 |
+
|
| 81 |
+
@abc.abstractmethod
|
| 82 |
+
def disable_next_request_compression(self):
|
| 83 |
+
"""Disables compression of the next request passed by the application."""
|
| 84 |
+
raise NotImplementedError()
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class Server(abc.ABC):
|
| 88 |
+
"""Services RPCs."""
|
| 89 |
+
|
| 90 |
+
@abc.abstractmethod
|
| 91 |
+
def add_insecure_port(self, address):
|
| 92 |
+
"""Reserves a port for insecure RPC service once this Server becomes active.
|
| 93 |
+
|
| 94 |
+
This method may only be called before calling this Server's start method is
|
| 95 |
+
called.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
address: The address for which to open a port.
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
An integer port on which RPCs will be serviced after this link has been
|
| 102 |
+
started. This is typically the same number as the port number contained
|
| 103 |
+
in the passed address, but will likely be different if the port number
|
| 104 |
+
contained in the passed address was zero.
|
| 105 |
+
"""
|
| 106 |
+
raise NotImplementedError()
|
| 107 |
+
|
| 108 |
+
@abc.abstractmethod
|
| 109 |
+
def add_secure_port(self, address, server_credentials):
|
| 110 |
+
"""Reserves a port for secure RPC service after this Server becomes active.
|
| 111 |
+
|
| 112 |
+
This method may only be called before calling this Server's start method is
|
| 113 |
+
called.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
address: The address for which to open a port.
|
| 117 |
+
server_credentials: A ServerCredentials.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
An integer port on which RPCs will be serviced after this link has been
|
| 121 |
+
started. This is typically the same number as the port number contained
|
| 122 |
+
in the passed address, but will likely be different if the port number
|
| 123 |
+
contained in the passed address was zero.
|
| 124 |
+
"""
|
| 125 |
+
raise NotImplementedError()
|
| 126 |
+
|
| 127 |
+
@abc.abstractmethod
|
| 128 |
+
def start(self):
|
| 129 |
+
"""Starts this Server's service of RPCs.
|
| 130 |
+
|
| 131 |
+
This method may only be called while the server is not serving RPCs (i.e. it
|
| 132 |
+
is not idempotent).
|
| 133 |
+
"""
|
| 134 |
+
raise NotImplementedError()
|
| 135 |
+
|
| 136 |
+
@abc.abstractmethod
|
| 137 |
+
def stop(self, grace):
|
| 138 |
+
"""Stops this Server's service of RPCs.
|
| 139 |
+
|
| 140 |
+
All calls to this method immediately stop service of new RPCs. When existing
|
| 141 |
+
RPCs are aborted is controlled by the grace period parameter passed to this
|
| 142 |
+
method.
|
| 143 |
+
|
| 144 |
+
This method may be called at any time and is idempotent. Passing a smaller
|
| 145 |
+
grace value than has been passed in a previous call will have the effect of
|
| 146 |
+
stopping the Server sooner. Passing a larger grace value than has been
|
| 147 |
+
passed in a previous call will not have the effect of stopping the server
|
| 148 |
+
later.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
grace: A duration of time in seconds to allow existing RPCs to complete
|
| 152 |
+
before being aborted by this Server's stopping. May be zero for
|
| 153 |
+
immediate abortion of all in-progress RPCs.
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
A threading.Event that will be set when this Server has completely
|
| 157 |
+
stopped. The returned event may not be set until after the full grace
|
| 158 |
+
period (if some ongoing RPC continues for the full length of the period)
|
| 159 |
+
of it may be set much sooner (such as if this Server had no RPCs underway
|
| 160 |
+
at the time it was stopped or if all RPCs that it had underway completed
|
| 161 |
+
very early in the grace period).
|
| 162 |
+
"""
|
| 163 |
+
raise NotImplementedError()
|
lib/python3.10/site-packages/grpc/beta/utilities.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2015 gRPC authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Utilities for the gRPC Python Beta API."""
|
| 15 |
+
|
| 16 |
+
import threading
|
| 17 |
+
import time
|
| 18 |
+
|
| 19 |
+
# implementations is referenced from specification in this module.
|
| 20 |
+
from grpc.beta import implementations # pylint: disable=unused-import
|
| 21 |
+
from grpc.beta import interfaces
|
| 22 |
+
from grpc.framework.foundation import callable_util
|
| 23 |
+
from grpc.framework.foundation import future
|
| 24 |
+
|
| 25 |
+
_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = (
|
| 26 |
+
'Exception calling connectivity future "done" callback!'
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class _ChannelReadyFuture(future.Future):
|
| 31 |
+
def __init__(self, channel):
|
| 32 |
+
self._condition = threading.Condition()
|
| 33 |
+
self._channel = channel
|
| 34 |
+
|
| 35 |
+
self._matured = False
|
| 36 |
+
self._cancelled = False
|
| 37 |
+
self._done_callbacks = []
|
| 38 |
+
|
| 39 |
+
def _block(self, timeout):
|
| 40 |
+
until = None if timeout is None else time.time() + timeout
|
| 41 |
+
with self._condition:
|
| 42 |
+
while True:
|
| 43 |
+
if self._cancelled:
|
| 44 |
+
raise future.CancelledError()
|
| 45 |
+
elif self._matured:
|
| 46 |
+
return
|
| 47 |
+
else:
|
| 48 |
+
if until is None:
|
| 49 |
+
self._condition.wait()
|
| 50 |
+
else:
|
| 51 |
+
remaining = until - time.time()
|
| 52 |
+
if remaining < 0:
|
| 53 |
+
raise future.TimeoutError()
|
| 54 |
+
else:
|
| 55 |
+
self._condition.wait(timeout=remaining)
|
| 56 |
+
|
| 57 |
+
def _update(self, connectivity):
|
| 58 |
+
with self._condition:
|
| 59 |
+
if (
|
| 60 |
+
not self._cancelled
|
| 61 |
+
and connectivity is interfaces.ChannelConnectivity.READY
|
| 62 |
+
):
|
| 63 |
+
self._matured = True
|
| 64 |
+
self._channel.unsubscribe(self._update)
|
| 65 |
+
self._condition.notify_all()
|
| 66 |
+
done_callbacks = tuple(self._done_callbacks)
|
| 67 |
+
self._done_callbacks = None
|
| 68 |
+
else:
|
| 69 |
+
return
|
| 70 |
+
|
| 71 |
+
for done_callback in done_callbacks:
|
| 72 |
+
callable_util.call_logging_exceptions(
|
| 73 |
+
done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
def cancel(self):
|
| 77 |
+
with self._condition:
|
| 78 |
+
if not self._matured:
|
| 79 |
+
self._cancelled = True
|
| 80 |
+
self._channel.unsubscribe(self._update)
|
| 81 |
+
self._condition.notify_all()
|
| 82 |
+
done_callbacks = tuple(self._done_callbacks)
|
| 83 |
+
self._done_callbacks = None
|
| 84 |
+
else:
|
| 85 |
+
return False
|
| 86 |
+
|
| 87 |
+
for done_callback in done_callbacks:
|
| 88 |
+
callable_util.call_logging_exceptions(
|
| 89 |
+
done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
return True
|
| 93 |
+
|
| 94 |
+
def cancelled(self):
|
| 95 |
+
with self._condition:
|
| 96 |
+
return self._cancelled
|
| 97 |
+
|
| 98 |
+
def running(self):
|
| 99 |
+
with self._condition:
|
| 100 |
+
return not self._cancelled and not self._matured
|
| 101 |
+
|
| 102 |
+
def done(self):
|
| 103 |
+
with self._condition:
|
| 104 |
+
return self._cancelled or self._matured
|
| 105 |
+
|
| 106 |
+
def result(self, timeout=None):
|
| 107 |
+
self._block(timeout)
|
| 108 |
+
return None
|
| 109 |
+
|
| 110 |
+
def exception(self, timeout=None):
|
| 111 |
+
self._block(timeout)
|
| 112 |
+
return None
|
| 113 |
+
|
| 114 |
+
def traceback(self, timeout=None):
|
| 115 |
+
self._block(timeout)
|
| 116 |
+
return None
|
| 117 |
+
|
| 118 |
+
def add_done_callback(self, fn):
|
| 119 |
+
with self._condition:
|
| 120 |
+
if not self._cancelled and not self._matured:
|
| 121 |
+
self._done_callbacks.append(fn)
|
| 122 |
+
return
|
| 123 |
+
|
| 124 |
+
fn(self)
|
| 125 |
+
|
| 126 |
+
def start(self):
|
| 127 |
+
with self._condition:
|
| 128 |
+
self._channel.subscribe(self._update, try_to_connect=True)
|
| 129 |
+
|
| 130 |
+
def __del__(self):
|
| 131 |
+
with self._condition:
|
| 132 |
+
if not self._cancelled and not self._matured:
|
| 133 |
+
self._channel.unsubscribe(self._update)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def channel_ready_future(channel):
|
| 137 |
+
"""Creates a future.Future tracking when an implementations.Channel is ready.
|
| 138 |
+
|
| 139 |
+
Cancelling the returned future.Future does not tell the given
|
| 140 |
+
implementations.Channel to abandon attempts it may have been making to
|
| 141 |
+
connect; cancelling merely deactivates the return future.Future's
|
| 142 |
+
subscription to the given implementations.Channel's connectivity.
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
channel: An implementations.Channel.
|
| 146 |
+
|
| 147 |
+
Returns:
|
| 148 |
+
A future.Future that matures when the given Channel has connectivity
|
| 149 |
+
interfaces.ChannelConnectivity.READY.
|
| 150 |
+
"""
|
| 151 |
+
ready_future = _ChannelReadyFuture(channel)
|
| 152 |
+
ready_future.start()
|
| 153 |
+
return ready_future
|
lib/python3.10/site-packages/nltk-3.8.1.dist-info/entry_points.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
[console_scripts]
|
| 3 |
+
nltk=nltk.cli:cli
|
lib/python3.10/site-packages/nltk/VERSION
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.8.1
|
lib/python3.10/site-packages/nltk/__init__.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit (NLTK)
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Authors: Steven Bird <stevenbird1@gmail.com>
|
| 5 |
+
# Edward Loper <edloper@gmail.com>
|
| 6 |
+
# URL: <https://www.nltk.org/>
|
| 7 |
+
# For license information, see LICENSE.TXT
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
The Natural Language Toolkit (NLTK) is an open source Python library
|
| 11 |
+
for Natural Language Processing. A free online book is available.
|
| 12 |
+
(If you use the library for academic research, please cite the book.)
|
| 13 |
+
|
| 14 |
+
Steven Bird, Ewan Klein, and Edward Loper (2009).
|
| 15 |
+
Natural Language Processing with Python. O'Reilly Media Inc.
|
| 16 |
+
https://www.nltk.org/book/
|
| 17 |
+
|
| 18 |
+
isort:skip_file
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import os
|
| 22 |
+
|
| 23 |
+
# //////////////////////////////////////////////////////
|
| 24 |
+
# Metadata
|
| 25 |
+
# //////////////////////////////////////////////////////
|
| 26 |
+
|
| 27 |
+
# Version. For each new release, the version number should be updated
|
| 28 |
+
# in the file VERSION.
|
| 29 |
+
try:
|
| 30 |
+
# If a VERSION file exists, use it!
|
| 31 |
+
version_file = os.path.join(os.path.dirname(__file__), "VERSION")
|
| 32 |
+
with open(version_file) as infile:
|
| 33 |
+
__version__ = infile.read().strip()
|
| 34 |
+
except NameError:
|
| 35 |
+
__version__ = "unknown (running code interactively?)"
|
| 36 |
+
except OSError as ex:
|
| 37 |
+
__version__ = "unknown (%s)" % ex
|
| 38 |
+
|
| 39 |
+
if __doc__ is not None: # fix for the ``python -OO``
|
| 40 |
+
__doc__ += "\n@version: " + __version__
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# Copyright notice
|
| 44 |
+
__copyright__ = """\
|
| 45 |
+
Copyright (C) 2001-2023 NLTK Project.
|
| 46 |
+
|
| 47 |
+
Distributed and Licensed under the Apache License, Version 2.0,
|
| 48 |
+
which is included by reference.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
__license__ = "Apache License, Version 2.0"
|
| 52 |
+
# Description of the toolkit, keywords, and the project's primary URL.
|
| 53 |
+
__longdescr__ = """\
|
| 54 |
+
The Natural Language Toolkit (NLTK) is a Python package for
|
| 55 |
+
natural language processing. NLTK requires Python 3.7, 3.8, 3.9, 3.10 or 3.11."""
|
| 56 |
+
__keywords__ = [
|
| 57 |
+
"NLP",
|
| 58 |
+
"CL",
|
| 59 |
+
"natural language processing",
|
| 60 |
+
"computational linguistics",
|
| 61 |
+
"parsing",
|
| 62 |
+
"tagging",
|
| 63 |
+
"tokenizing",
|
| 64 |
+
"syntax",
|
| 65 |
+
"linguistics",
|
| 66 |
+
"language",
|
| 67 |
+
"natural language",
|
| 68 |
+
"text analytics",
|
| 69 |
+
]
|
| 70 |
+
__url__ = "https://www.nltk.org/"
|
| 71 |
+
|
| 72 |
+
# Maintainer, contributors, etc.
|
| 73 |
+
__maintainer__ = "NLTK Team"
|
| 74 |
+
__maintainer_email__ = "nltk.team@gmail.com"
|
| 75 |
+
__author__ = __maintainer__
|
| 76 |
+
__author_email__ = __maintainer_email__
|
| 77 |
+
|
| 78 |
+
# "Trove" classifiers for Python Package Index.
|
| 79 |
+
__classifiers__ = [
|
| 80 |
+
"Development Status :: 5 - Production/Stable",
|
| 81 |
+
"Intended Audience :: Developers",
|
| 82 |
+
"Intended Audience :: Education",
|
| 83 |
+
"Intended Audience :: Information Technology",
|
| 84 |
+
"Intended Audience :: Science/Research",
|
| 85 |
+
"License :: OSI Approved :: Apache Software License",
|
| 86 |
+
"Operating System :: OS Independent",
|
| 87 |
+
"Programming Language :: Python :: 3.7",
|
| 88 |
+
"Programming Language :: Python :: 3.8",
|
| 89 |
+
"Programming Language :: Python :: 3.9",
|
| 90 |
+
"Programming Language :: Python :: 3.10",
|
| 91 |
+
"Programming Language :: Python :: 3.11",
|
| 92 |
+
"Topic :: Scientific/Engineering",
|
| 93 |
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
| 94 |
+
"Topic :: Scientific/Engineering :: Human Machine Interfaces",
|
| 95 |
+
"Topic :: Scientific/Engineering :: Information Analysis",
|
| 96 |
+
"Topic :: Text Processing",
|
| 97 |
+
"Topic :: Text Processing :: Filters",
|
| 98 |
+
"Topic :: Text Processing :: General",
|
| 99 |
+
"Topic :: Text Processing :: Indexing",
|
| 100 |
+
"Topic :: Text Processing :: Linguistic",
|
| 101 |
+
]
|
| 102 |
+
|
| 103 |
+
from nltk.internals import config_java
|
| 104 |
+
|
| 105 |
+
# support numpy from pypy
|
| 106 |
+
try:
|
| 107 |
+
import numpypy
|
| 108 |
+
except ImportError:
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
# Override missing methods on environments where it cannot be used like GAE.
|
| 112 |
+
import subprocess
|
| 113 |
+
|
| 114 |
+
if not hasattr(subprocess, "PIPE"):
|
| 115 |
+
|
| 116 |
+
def _fake_PIPE(*args, **kwargs):
|
| 117 |
+
raise NotImplementedError("subprocess.PIPE is not supported.")
|
| 118 |
+
|
| 119 |
+
subprocess.PIPE = _fake_PIPE
|
| 120 |
+
if not hasattr(subprocess, "Popen"):
|
| 121 |
+
|
| 122 |
+
def _fake_Popen(*args, **kwargs):
|
| 123 |
+
raise NotImplementedError("subprocess.Popen is not supported.")
|
| 124 |
+
|
| 125 |
+
subprocess.Popen = _fake_Popen
|
| 126 |
+
|
| 127 |
+
###########################################################
|
| 128 |
+
# TOP-LEVEL MODULES
|
| 129 |
+
###########################################################
|
| 130 |
+
|
| 131 |
+
# Import top-level functionality into top-level namespace
|
| 132 |
+
|
| 133 |
+
from nltk.collocations import *
|
| 134 |
+
from nltk.decorators import decorator, memoize
|
| 135 |
+
from nltk.featstruct import *
|
| 136 |
+
from nltk.grammar import *
|
| 137 |
+
from nltk.probability import *
|
| 138 |
+
from nltk.text import *
|
| 139 |
+
from nltk.util import *
|
| 140 |
+
from nltk.jsontags import *
|
| 141 |
+
|
| 142 |
+
###########################################################
|
| 143 |
+
# PACKAGES
|
| 144 |
+
###########################################################
|
| 145 |
+
|
| 146 |
+
from nltk.chunk import *
|
| 147 |
+
from nltk.classify import *
|
| 148 |
+
from nltk.inference import *
|
| 149 |
+
from nltk.metrics import *
|
| 150 |
+
from nltk.parse import *
|
| 151 |
+
from nltk.tag import *
|
| 152 |
+
from nltk.tokenize import *
|
| 153 |
+
from nltk.translate import *
|
| 154 |
+
from nltk.tree import *
|
| 155 |
+
from nltk.sem import *
|
| 156 |
+
from nltk.stem import *
|
| 157 |
+
|
| 158 |
+
# Packages which can be lazily imported
|
| 159 |
+
# (a) we don't import *
|
| 160 |
+
# (b) they're slow to import or have run-time dependencies
|
| 161 |
+
# that can safely fail at run time
|
| 162 |
+
|
| 163 |
+
from nltk import lazyimport
|
| 164 |
+
|
| 165 |
+
app = lazyimport.LazyModule("app", locals(), globals())
|
| 166 |
+
chat = lazyimport.LazyModule("chat", locals(), globals())
|
| 167 |
+
corpus = lazyimport.LazyModule("corpus", locals(), globals())
|
| 168 |
+
draw = lazyimport.LazyModule("draw", locals(), globals())
|
| 169 |
+
toolbox = lazyimport.LazyModule("toolbox", locals(), globals())
|
| 170 |
+
|
| 171 |
+
# Optional loading
|
| 172 |
+
|
| 173 |
+
try:
|
| 174 |
+
import numpy
|
| 175 |
+
except ImportError:
|
| 176 |
+
pass
|
| 177 |
+
else:
|
| 178 |
+
from nltk import cluster
|
| 179 |
+
|
| 180 |
+
from nltk.downloader import download, download_shell
|
| 181 |
+
|
| 182 |
+
try:
|
| 183 |
+
import tkinter
|
| 184 |
+
except ImportError:
|
| 185 |
+
pass
|
| 186 |
+
else:
|
| 187 |
+
try:
|
| 188 |
+
from nltk.downloader import download_gui
|
| 189 |
+
except RuntimeError as e:
|
| 190 |
+
import warnings
|
| 191 |
+
|
| 192 |
+
warnings.warn(
|
| 193 |
+
"Corpus downloader GUI not loaded "
|
| 194 |
+
"(RuntimeError during import: %s)" % str(e)
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
# explicitly import all top-level modules (ensuring
|
| 198 |
+
# they override the same names inadvertently imported
|
| 199 |
+
# from a subpackage)
|
| 200 |
+
|
| 201 |
+
from nltk import ccg, chunk, classify, collocations
|
| 202 |
+
from nltk import data, featstruct, grammar, help, inference, metrics
|
| 203 |
+
from nltk import misc, parse, probability, sem, stem, wsd
|
| 204 |
+
from nltk import tag, tbl, text, tokenize, translate, tree, util
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
# FIXME: override any accidentally imported demo, see https://github.com/nltk/nltk/issues/2116
|
| 208 |
+
def demo():
|
| 209 |
+
print("To run the demo code for a module, type nltk.module.demo()")
|
lib/python3.10/site-packages/nltk/book.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: Some texts for exploration in chapter 1 of the book
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Author: Steven Bird <stevenbird1@gmail.com>
|
| 5 |
+
#
|
| 6 |
+
# URL: <https://www.nltk.org/>
|
| 7 |
+
# For license information, see LICENSE.TXT
|
| 8 |
+
|
| 9 |
+
from nltk.corpus import (
|
| 10 |
+
genesis,
|
| 11 |
+
gutenberg,
|
| 12 |
+
inaugural,
|
| 13 |
+
nps_chat,
|
| 14 |
+
treebank,
|
| 15 |
+
webtext,
|
| 16 |
+
wordnet,
|
| 17 |
+
)
|
| 18 |
+
from nltk.probability import FreqDist
|
| 19 |
+
from nltk.text import Text
|
| 20 |
+
from nltk.util import bigrams
|
| 21 |
+
|
| 22 |
+
print("*** Introductory Examples for the NLTK Book ***")
|
| 23 |
+
print("Loading text1, ..., text9 and sent1, ..., sent9")
|
| 24 |
+
print("Type the name of the text or sentence to view it.")
|
| 25 |
+
print("Type: 'texts()' or 'sents()' to list the materials.")
|
| 26 |
+
|
| 27 |
+
text1 = Text(gutenberg.words("melville-moby_dick.txt"))
|
| 28 |
+
print("text1:", text1.name)
|
| 29 |
+
|
| 30 |
+
text2 = Text(gutenberg.words("austen-sense.txt"))
|
| 31 |
+
print("text2:", text2.name)
|
| 32 |
+
|
| 33 |
+
text3 = Text(genesis.words("english-kjv.txt"), name="The Book of Genesis")
|
| 34 |
+
print("text3:", text3.name)
|
| 35 |
+
|
| 36 |
+
text4 = Text(inaugural.words(), name="Inaugural Address Corpus")
|
| 37 |
+
print("text4:", text4.name)
|
| 38 |
+
|
| 39 |
+
text5 = Text(nps_chat.words(), name="Chat Corpus")
|
| 40 |
+
print("text5:", text5.name)
|
| 41 |
+
|
| 42 |
+
text6 = Text(webtext.words("grail.txt"), name="Monty Python and the Holy Grail")
|
| 43 |
+
print("text6:", text6.name)
|
| 44 |
+
|
| 45 |
+
text7 = Text(treebank.words(), name="Wall Street Journal")
|
| 46 |
+
print("text7:", text7.name)
|
| 47 |
+
|
| 48 |
+
text8 = Text(webtext.words("singles.txt"), name="Personals Corpus")
|
| 49 |
+
print("text8:", text8.name)
|
| 50 |
+
|
| 51 |
+
text9 = Text(gutenberg.words("chesterton-thursday.txt"))
|
| 52 |
+
print("text9:", text9.name)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def texts():
|
| 56 |
+
print("text1:", text1.name)
|
| 57 |
+
print("text2:", text2.name)
|
| 58 |
+
print("text3:", text3.name)
|
| 59 |
+
print("text4:", text4.name)
|
| 60 |
+
print("text5:", text5.name)
|
| 61 |
+
print("text6:", text6.name)
|
| 62 |
+
print("text7:", text7.name)
|
| 63 |
+
print("text8:", text8.name)
|
| 64 |
+
print("text9:", text9.name)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
sent1 = ["Call", "me", "Ishmael", "."]
|
| 68 |
+
sent2 = [
|
| 69 |
+
"The",
|
| 70 |
+
"family",
|
| 71 |
+
"of",
|
| 72 |
+
"Dashwood",
|
| 73 |
+
"had",
|
| 74 |
+
"long",
|
| 75 |
+
"been",
|
| 76 |
+
"settled",
|
| 77 |
+
"in",
|
| 78 |
+
"Sussex",
|
| 79 |
+
".",
|
| 80 |
+
]
|
| 81 |
+
sent3 = [
|
| 82 |
+
"In",
|
| 83 |
+
"the",
|
| 84 |
+
"beginning",
|
| 85 |
+
"God",
|
| 86 |
+
"created",
|
| 87 |
+
"the",
|
| 88 |
+
"heaven",
|
| 89 |
+
"and",
|
| 90 |
+
"the",
|
| 91 |
+
"earth",
|
| 92 |
+
".",
|
| 93 |
+
]
|
| 94 |
+
sent4 = [
|
| 95 |
+
"Fellow",
|
| 96 |
+
"-",
|
| 97 |
+
"Citizens",
|
| 98 |
+
"of",
|
| 99 |
+
"the",
|
| 100 |
+
"Senate",
|
| 101 |
+
"and",
|
| 102 |
+
"of",
|
| 103 |
+
"the",
|
| 104 |
+
"House",
|
| 105 |
+
"of",
|
| 106 |
+
"Representatives",
|
| 107 |
+
":",
|
| 108 |
+
]
|
| 109 |
+
sent5 = [
|
| 110 |
+
"I",
|
| 111 |
+
"have",
|
| 112 |
+
"a",
|
| 113 |
+
"problem",
|
| 114 |
+
"with",
|
| 115 |
+
"people",
|
| 116 |
+
"PMing",
|
| 117 |
+
"me",
|
| 118 |
+
"to",
|
| 119 |
+
"lol",
|
| 120 |
+
"JOIN",
|
| 121 |
+
]
|
| 122 |
+
sent6 = [
|
| 123 |
+
"SCENE",
|
| 124 |
+
"1",
|
| 125 |
+
":",
|
| 126 |
+
"[",
|
| 127 |
+
"wind",
|
| 128 |
+
"]",
|
| 129 |
+
"[",
|
| 130 |
+
"clop",
|
| 131 |
+
"clop",
|
| 132 |
+
"clop",
|
| 133 |
+
"]",
|
| 134 |
+
"KING",
|
| 135 |
+
"ARTHUR",
|
| 136 |
+
":",
|
| 137 |
+
"Whoa",
|
| 138 |
+
"there",
|
| 139 |
+
"!",
|
| 140 |
+
]
|
| 141 |
+
sent7 = [
|
| 142 |
+
"Pierre",
|
| 143 |
+
"Vinken",
|
| 144 |
+
",",
|
| 145 |
+
"61",
|
| 146 |
+
"years",
|
| 147 |
+
"old",
|
| 148 |
+
",",
|
| 149 |
+
"will",
|
| 150 |
+
"join",
|
| 151 |
+
"the",
|
| 152 |
+
"board",
|
| 153 |
+
"as",
|
| 154 |
+
"a",
|
| 155 |
+
"nonexecutive",
|
| 156 |
+
"director",
|
| 157 |
+
"Nov.",
|
| 158 |
+
"29",
|
| 159 |
+
".",
|
| 160 |
+
]
|
| 161 |
+
sent8 = [
|
| 162 |
+
"25",
|
| 163 |
+
"SEXY",
|
| 164 |
+
"MALE",
|
| 165 |
+
",",
|
| 166 |
+
"seeks",
|
| 167 |
+
"attrac",
|
| 168 |
+
"older",
|
| 169 |
+
"single",
|
| 170 |
+
"lady",
|
| 171 |
+
",",
|
| 172 |
+
"for",
|
| 173 |
+
"discreet",
|
| 174 |
+
"encounters",
|
| 175 |
+
".",
|
| 176 |
+
]
|
| 177 |
+
sent9 = [
|
| 178 |
+
"THE",
|
| 179 |
+
"suburb",
|
| 180 |
+
"of",
|
| 181 |
+
"Saffron",
|
| 182 |
+
"Park",
|
| 183 |
+
"lay",
|
| 184 |
+
"on",
|
| 185 |
+
"the",
|
| 186 |
+
"sunset",
|
| 187 |
+
"side",
|
| 188 |
+
"of",
|
| 189 |
+
"London",
|
| 190 |
+
",",
|
| 191 |
+
"as",
|
| 192 |
+
"red",
|
| 193 |
+
"and",
|
| 194 |
+
"ragged",
|
| 195 |
+
"as",
|
| 196 |
+
"a",
|
| 197 |
+
"cloud",
|
| 198 |
+
"of",
|
| 199 |
+
"sunset",
|
| 200 |
+
".",
|
| 201 |
+
]
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def sents():
|
| 205 |
+
print("sent1:", " ".join(sent1))
|
| 206 |
+
print("sent2:", " ".join(sent2))
|
| 207 |
+
print("sent3:", " ".join(sent3))
|
| 208 |
+
print("sent4:", " ".join(sent4))
|
| 209 |
+
print("sent5:", " ".join(sent5))
|
| 210 |
+
print("sent6:", " ".join(sent6))
|
| 211 |
+
print("sent7:", " ".join(sent7))
|
| 212 |
+
print("sent8:", " ".join(sent8))
|
| 213 |
+
print("sent9:", " ".join(sent9))
|
lib/python3.10/site-packages/nltk/cli.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: NLTK Command-Line Interface
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# URL: <https://www.nltk.org/>
|
| 5 |
+
# For license information, see LICENSE.TXT
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
import click
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
from nltk import word_tokenize
|
| 12 |
+
from nltk.util import parallelize_preprocess
|
| 13 |
+
|
| 14 |
+
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@click.group(context_settings=CONTEXT_SETTINGS)
|
| 18 |
+
@click.version_option()
|
| 19 |
+
def cli():
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@cli.command("tokenize")
|
| 24 |
+
@click.option(
|
| 25 |
+
"--language",
|
| 26 |
+
"-l",
|
| 27 |
+
default="en",
|
| 28 |
+
help="The language for the Punkt sentence tokenization.",
|
| 29 |
+
)
|
| 30 |
+
@click.option(
|
| 31 |
+
"--preserve-line",
|
| 32 |
+
"-l",
|
| 33 |
+
default=True,
|
| 34 |
+
is_flag=True,
|
| 35 |
+
help="An option to keep the preserve the sentence and not sentence tokenize it.",
|
| 36 |
+
)
|
| 37 |
+
@click.option("--processes", "-j", default=1, help="No. of processes.")
|
| 38 |
+
@click.option("--encoding", "-e", default="utf8", help="Specify encoding of file.")
|
| 39 |
+
@click.option(
|
| 40 |
+
"--delimiter", "-d", default=" ", help="Specify delimiter to join the tokens."
|
| 41 |
+
)
|
| 42 |
+
def tokenize_file(language, preserve_line, processes, encoding, delimiter):
|
| 43 |
+
"""This command tokenizes text stream using nltk.word_tokenize"""
|
| 44 |
+
with click.get_text_stream("stdin", encoding=encoding) as fin:
|
| 45 |
+
with click.get_text_stream("stdout", encoding=encoding) as fout:
|
| 46 |
+
# If it's single process, joblib parallelization is slower,
|
| 47 |
+
# so just process line by line normally.
|
| 48 |
+
if processes == 1:
|
| 49 |
+
for line in tqdm(fin.readlines()):
|
| 50 |
+
print(delimiter.join(word_tokenize(line)), end="\n", file=fout)
|
| 51 |
+
else:
|
| 52 |
+
for outline in parallelize_preprocess(
|
| 53 |
+
word_tokenize, fin.readlines(), processes, progress_bar=True
|
| 54 |
+
):
|
| 55 |
+
print(delimiter.join(outline), end="\n", file=fout)
|
lib/python3.10/site-packages/nltk/collections.py
ADDED
|
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: Collections
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Author: Steven Bird <stevenbird1@gmail.com>
|
| 5 |
+
# URL: <https://www.nltk.org/>
|
| 6 |
+
# For license information, see LICENSE.TXT
|
| 7 |
+
|
| 8 |
+
import bisect
|
| 9 |
+
|
| 10 |
+
# this unused import is for python 2.7
|
| 11 |
+
from collections import Counter, defaultdict, deque
|
| 12 |
+
from functools import total_ordering
|
| 13 |
+
from itertools import chain, islice
|
| 14 |
+
|
| 15 |
+
from nltk.internals import raise_unorderable_types, slice_bounds
|
| 16 |
+
|
| 17 |
+
##########################################################################
|
| 18 |
+
# Ordered Dictionary
|
| 19 |
+
##########################################################################
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class OrderedDict(dict):
|
| 23 |
+
def __init__(self, data=None, **kwargs):
|
| 24 |
+
self._keys = self.keys(data, kwargs.get("keys"))
|
| 25 |
+
self._default_factory = kwargs.get("default_factory")
|
| 26 |
+
if data is None:
|
| 27 |
+
dict.__init__(self)
|
| 28 |
+
else:
|
| 29 |
+
dict.__init__(self, data)
|
| 30 |
+
|
| 31 |
+
def __delitem__(self, key):
|
| 32 |
+
dict.__delitem__(self, key)
|
| 33 |
+
self._keys.remove(key)
|
| 34 |
+
|
| 35 |
+
def __getitem__(self, key):
|
| 36 |
+
try:
|
| 37 |
+
return dict.__getitem__(self, key)
|
| 38 |
+
except KeyError:
|
| 39 |
+
return self.__missing__(key)
|
| 40 |
+
|
| 41 |
+
def __iter__(self):
|
| 42 |
+
return (key for key in self.keys())
|
| 43 |
+
|
| 44 |
+
def __missing__(self, key):
|
| 45 |
+
if not self._default_factory and key not in self._keys:
|
| 46 |
+
raise KeyError()
|
| 47 |
+
return self._default_factory()
|
| 48 |
+
|
| 49 |
+
def __setitem__(self, key, item):
|
| 50 |
+
dict.__setitem__(self, key, item)
|
| 51 |
+
if key not in self._keys:
|
| 52 |
+
self._keys.append(key)
|
| 53 |
+
|
| 54 |
+
def clear(self):
|
| 55 |
+
dict.clear(self)
|
| 56 |
+
self._keys.clear()
|
| 57 |
+
|
| 58 |
+
def copy(self):
|
| 59 |
+
d = dict.copy(self)
|
| 60 |
+
d._keys = self._keys
|
| 61 |
+
return d
|
| 62 |
+
|
| 63 |
+
def items(self):
|
| 64 |
+
# returns iterator under python 3 and list under python 2
|
| 65 |
+
return zip(self.keys(), self.values())
|
| 66 |
+
|
| 67 |
+
def keys(self, data=None, keys=None):
|
| 68 |
+
if data:
|
| 69 |
+
if keys:
|
| 70 |
+
assert isinstance(keys, list)
|
| 71 |
+
assert len(data) == len(keys)
|
| 72 |
+
return keys
|
| 73 |
+
else:
|
| 74 |
+
assert (
|
| 75 |
+
isinstance(data, dict)
|
| 76 |
+
or isinstance(data, OrderedDict)
|
| 77 |
+
or isinstance(data, list)
|
| 78 |
+
)
|
| 79 |
+
if isinstance(data, dict) or isinstance(data, OrderedDict):
|
| 80 |
+
return data.keys()
|
| 81 |
+
elif isinstance(data, list):
|
| 82 |
+
return [key for (key, value) in data]
|
| 83 |
+
elif "_keys" in self.__dict__:
|
| 84 |
+
return self._keys
|
| 85 |
+
else:
|
| 86 |
+
return []
|
| 87 |
+
|
| 88 |
+
def popitem(self):
|
| 89 |
+
if not self._keys:
|
| 90 |
+
raise KeyError()
|
| 91 |
+
|
| 92 |
+
key = self._keys.pop()
|
| 93 |
+
value = self[key]
|
| 94 |
+
del self[key]
|
| 95 |
+
return (key, value)
|
| 96 |
+
|
| 97 |
+
def setdefault(self, key, failobj=None):
|
| 98 |
+
dict.setdefault(self, key, failobj)
|
| 99 |
+
if key not in self._keys:
|
| 100 |
+
self._keys.append(key)
|
| 101 |
+
|
| 102 |
+
def update(self, data):
|
| 103 |
+
dict.update(self, data)
|
| 104 |
+
for key in self.keys(data):
|
| 105 |
+
if key not in self._keys:
|
| 106 |
+
self._keys.append(key)
|
| 107 |
+
|
| 108 |
+
def values(self):
|
| 109 |
+
# returns iterator under python 3
|
| 110 |
+
return map(self.get, self._keys)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
######################################################################
|
| 114 |
+
# Lazy Sequences
|
| 115 |
+
######################################################################
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@total_ordering
|
| 119 |
+
class AbstractLazySequence:
|
| 120 |
+
"""
|
| 121 |
+
An abstract base class for read-only sequences whose values are
|
| 122 |
+
computed as needed. Lazy sequences act like tuples -- they can be
|
| 123 |
+
indexed, sliced, and iterated over; but they may not be modified.
|
| 124 |
+
|
| 125 |
+
The most common application of lazy sequences in NLTK is for
|
| 126 |
+
corpus view objects, which provide access to the contents of a
|
| 127 |
+
corpus without loading the entire corpus into memory, by loading
|
| 128 |
+
pieces of the corpus from disk as needed.
|
| 129 |
+
|
| 130 |
+
The result of modifying a mutable element of a lazy sequence is
|
| 131 |
+
undefined. In particular, the modifications made to the element
|
| 132 |
+
may or may not persist, depending on whether and when the lazy
|
| 133 |
+
sequence caches that element's value or reconstructs it from
|
| 134 |
+
scratch.
|
| 135 |
+
|
| 136 |
+
Subclasses are required to define two methods: ``__len__()``
|
| 137 |
+
and ``iterate_from()``.
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
def __len__(self):
|
| 141 |
+
"""
|
| 142 |
+
Return the number of tokens in the corpus file underlying this
|
| 143 |
+
corpus view.
|
| 144 |
+
"""
|
| 145 |
+
raise NotImplementedError("should be implemented by subclass")
|
| 146 |
+
|
| 147 |
+
def iterate_from(self, start):
|
| 148 |
+
"""
|
| 149 |
+
Return an iterator that generates the tokens in the corpus
|
| 150 |
+
file underlying this corpus view, starting at the token number
|
| 151 |
+
``start``. If ``start>=len(self)``, then this iterator will
|
| 152 |
+
generate no tokens.
|
| 153 |
+
"""
|
| 154 |
+
raise NotImplementedError("should be implemented by subclass")
|
| 155 |
+
|
| 156 |
+
def __getitem__(self, i):
|
| 157 |
+
"""
|
| 158 |
+
Return the *i* th token in the corpus file underlying this
|
| 159 |
+
corpus view. Negative indices and spans are both supported.
|
| 160 |
+
"""
|
| 161 |
+
if isinstance(i, slice):
|
| 162 |
+
start, stop = slice_bounds(self, i)
|
| 163 |
+
return LazySubsequence(self, start, stop)
|
| 164 |
+
else:
|
| 165 |
+
# Handle negative indices
|
| 166 |
+
if i < 0:
|
| 167 |
+
i += len(self)
|
| 168 |
+
if i < 0:
|
| 169 |
+
raise IndexError("index out of range")
|
| 170 |
+
# Use iterate_from to extract it.
|
| 171 |
+
try:
|
| 172 |
+
return next(self.iterate_from(i))
|
| 173 |
+
except StopIteration as e:
|
| 174 |
+
raise IndexError("index out of range") from e
|
| 175 |
+
|
| 176 |
+
def __iter__(self):
|
| 177 |
+
"""Return an iterator that generates the tokens in the corpus
|
| 178 |
+
file underlying this corpus view."""
|
| 179 |
+
return self.iterate_from(0)
|
| 180 |
+
|
| 181 |
+
def count(self, value):
|
| 182 |
+
"""Return the number of times this list contains ``value``."""
|
| 183 |
+
return sum(1 for elt in self if elt == value)
|
| 184 |
+
|
| 185 |
+
def index(self, value, start=None, stop=None):
|
| 186 |
+
"""Return the index of the first occurrence of ``value`` in this
|
| 187 |
+
list that is greater than or equal to ``start`` and less than
|
| 188 |
+
``stop``. Negative start and stop values are treated like negative
|
| 189 |
+
slice bounds -- i.e., they count from the end of the list."""
|
| 190 |
+
start, stop = slice_bounds(self, slice(start, stop))
|
| 191 |
+
for i, elt in enumerate(islice(self, start, stop)):
|
| 192 |
+
if elt == value:
|
| 193 |
+
return i + start
|
| 194 |
+
raise ValueError("index(x): x not in list")
|
| 195 |
+
|
| 196 |
+
def __contains__(self, value):
|
| 197 |
+
"""Return true if this list contains ``value``."""
|
| 198 |
+
return bool(self.count(value))
|
| 199 |
+
|
| 200 |
+
def __add__(self, other):
|
| 201 |
+
"""Return a list concatenating self with other."""
|
| 202 |
+
return LazyConcatenation([self, other])
|
| 203 |
+
|
| 204 |
+
def __radd__(self, other):
|
| 205 |
+
"""Return a list concatenating other with self."""
|
| 206 |
+
return LazyConcatenation([other, self])
|
| 207 |
+
|
| 208 |
+
def __mul__(self, count):
|
| 209 |
+
"""Return a list concatenating self with itself ``count`` times."""
|
| 210 |
+
return LazyConcatenation([self] * count)
|
| 211 |
+
|
| 212 |
+
def __rmul__(self, count):
|
| 213 |
+
"""Return a list concatenating self with itself ``count`` times."""
|
| 214 |
+
return LazyConcatenation([self] * count)
|
| 215 |
+
|
| 216 |
+
_MAX_REPR_SIZE = 60
|
| 217 |
+
|
| 218 |
+
def __repr__(self):
|
| 219 |
+
"""
|
| 220 |
+
Return a string representation for this corpus view that is
|
| 221 |
+
similar to a list's representation; but if it would be more
|
| 222 |
+
than 60 characters long, it is truncated.
|
| 223 |
+
"""
|
| 224 |
+
pieces = []
|
| 225 |
+
length = 5
|
| 226 |
+
for elt in self:
|
| 227 |
+
pieces.append(repr(elt))
|
| 228 |
+
length += len(pieces[-1]) + 2
|
| 229 |
+
if length > self._MAX_REPR_SIZE and len(pieces) > 2:
|
| 230 |
+
return "[%s, ...]" % ", ".join(pieces[:-1])
|
| 231 |
+
return "[%s]" % ", ".join(pieces)
|
| 232 |
+
|
| 233 |
+
def __eq__(self, other):
|
| 234 |
+
return type(self) == type(other) and list(self) == list(other)
|
| 235 |
+
|
| 236 |
+
def __ne__(self, other):
|
| 237 |
+
return not self == other
|
| 238 |
+
|
| 239 |
+
def __lt__(self, other):
|
| 240 |
+
if type(other) != type(self):
|
| 241 |
+
raise_unorderable_types("<", self, other)
|
| 242 |
+
return list(self) < list(other)
|
| 243 |
+
|
| 244 |
+
def __hash__(self):
|
| 245 |
+
"""
|
| 246 |
+
:raise ValueError: Corpus view objects are unhashable.
|
| 247 |
+
"""
|
| 248 |
+
raise ValueError("%s objects are unhashable" % self.__class__.__name__)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class LazySubsequence(AbstractLazySequence):
|
| 252 |
+
"""
|
| 253 |
+
A subsequence produced by slicing a lazy sequence. This slice
|
| 254 |
+
keeps a reference to its source sequence, and generates its values
|
| 255 |
+
by looking them up in the source sequence.
|
| 256 |
+
"""
|
| 257 |
+
|
| 258 |
+
MIN_SIZE = 100
|
| 259 |
+
"""
|
| 260 |
+
The minimum size for which lazy slices should be created. If
|
| 261 |
+
``LazySubsequence()`` is called with a subsequence that is
|
| 262 |
+
shorter than ``MIN_SIZE``, then a tuple will be returned instead.
|
| 263 |
+
"""
|
| 264 |
+
|
| 265 |
+
def __new__(cls, source, start, stop):
|
| 266 |
+
"""
|
| 267 |
+
Construct a new slice from a given underlying sequence. The
|
| 268 |
+
``start`` and ``stop`` indices should be absolute indices --
|
| 269 |
+
i.e., they should not be negative (for indexing from the back
|
| 270 |
+
of a list) or greater than the length of ``source``.
|
| 271 |
+
"""
|
| 272 |
+
# If the slice is small enough, just use a tuple.
|
| 273 |
+
if stop - start < cls.MIN_SIZE:
|
| 274 |
+
return list(islice(source.iterate_from(start), stop - start))
|
| 275 |
+
else:
|
| 276 |
+
return object.__new__(cls)
|
| 277 |
+
|
| 278 |
+
def __init__(self, source, start, stop):
|
| 279 |
+
self._source = source
|
| 280 |
+
self._start = start
|
| 281 |
+
self._stop = stop
|
| 282 |
+
|
| 283 |
+
def __len__(self):
|
| 284 |
+
return self._stop - self._start
|
| 285 |
+
|
| 286 |
+
def iterate_from(self, start):
|
| 287 |
+
return islice(
|
| 288 |
+
self._source.iterate_from(start + self._start), max(0, len(self) - start)
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
class LazyConcatenation(AbstractLazySequence):
|
| 293 |
+
"""
|
| 294 |
+
A lazy sequence formed by concatenating a list of lists. This
|
| 295 |
+
underlying list of lists may itself be lazy. ``LazyConcatenation``
|
| 296 |
+
maintains an index that it uses to keep track of the relationship
|
| 297 |
+
between offsets in the concatenated lists and offsets in the
|
| 298 |
+
sublists.
|
| 299 |
+
"""
|
| 300 |
+
|
| 301 |
+
def __init__(self, list_of_lists):
|
| 302 |
+
self._list = list_of_lists
|
| 303 |
+
self._offsets = [0]
|
| 304 |
+
|
| 305 |
+
def __len__(self):
|
| 306 |
+
if len(self._offsets) <= len(self._list):
|
| 307 |
+
for _ in self.iterate_from(self._offsets[-1]):
|
| 308 |
+
pass
|
| 309 |
+
return self._offsets[-1]
|
| 310 |
+
|
| 311 |
+
def iterate_from(self, start_index):
|
| 312 |
+
if start_index < self._offsets[-1]:
|
| 313 |
+
sublist_index = bisect.bisect_right(self._offsets, start_index) - 1
|
| 314 |
+
else:
|
| 315 |
+
sublist_index = len(self._offsets) - 1
|
| 316 |
+
|
| 317 |
+
index = self._offsets[sublist_index]
|
| 318 |
+
|
| 319 |
+
# Construct an iterator over the sublists.
|
| 320 |
+
if isinstance(self._list, AbstractLazySequence):
|
| 321 |
+
sublist_iter = self._list.iterate_from(sublist_index)
|
| 322 |
+
else:
|
| 323 |
+
sublist_iter = islice(self._list, sublist_index, None)
|
| 324 |
+
|
| 325 |
+
for sublist in sublist_iter:
|
| 326 |
+
if sublist_index == (len(self._offsets) - 1):
|
| 327 |
+
assert (
|
| 328 |
+
index + len(sublist) >= self._offsets[-1]
|
| 329 |
+
), "offsets not monotonic increasing!"
|
| 330 |
+
self._offsets.append(index + len(sublist))
|
| 331 |
+
else:
|
| 332 |
+
assert self._offsets[sublist_index + 1] == index + len(
|
| 333 |
+
sublist
|
| 334 |
+
), "inconsistent list value (num elts)"
|
| 335 |
+
|
| 336 |
+
yield from sublist[max(0, start_index - index) :]
|
| 337 |
+
|
| 338 |
+
index += len(sublist)
|
| 339 |
+
sublist_index += 1
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class LazyMap(AbstractLazySequence):
|
| 343 |
+
"""
|
| 344 |
+
A lazy sequence whose elements are formed by applying a given
|
| 345 |
+
function to each element in one or more underlying lists. The
|
| 346 |
+
function is applied lazily -- i.e., when you read a value from the
|
| 347 |
+
list, ``LazyMap`` will calculate that value by applying its
|
| 348 |
+
function to the underlying lists' value(s). ``LazyMap`` is
|
| 349 |
+
essentially a lazy version of the Python primitive function
|
| 350 |
+
``map``. In particular, the following two expressions are
|
| 351 |
+
equivalent:
|
| 352 |
+
|
| 353 |
+
>>> from nltk.collections import LazyMap
|
| 354 |
+
>>> function = str
|
| 355 |
+
>>> sequence = [1,2,3]
|
| 356 |
+
>>> map(function, sequence) # doctest: +SKIP
|
| 357 |
+
['1', '2', '3']
|
| 358 |
+
>>> list(LazyMap(function, sequence))
|
| 359 |
+
['1', '2', '3']
|
| 360 |
+
|
| 361 |
+
Like the Python ``map`` primitive, if the source lists do not have
|
| 362 |
+
equal size, then the value None will be supplied for the
|
| 363 |
+
'missing' elements.
|
| 364 |
+
|
| 365 |
+
Lazy maps can be useful for conserving memory, in cases where
|
| 366 |
+
individual values take up a lot of space. This is especially true
|
| 367 |
+
if the underlying list's values are constructed lazily, as is the
|
| 368 |
+
case with many corpus readers.
|
| 369 |
+
|
| 370 |
+
A typical example of a use case for this class is performing
|
| 371 |
+
feature detection on the tokens in a corpus. Since featuresets
|
| 372 |
+
are encoded as dictionaries, which can take up a lot of memory,
|
| 373 |
+
using a ``LazyMap`` can significantly reduce memory usage when
|
| 374 |
+
training and running classifiers.
|
| 375 |
+
"""
|
| 376 |
+
|
| 377 |
+
def __init__(self, function, *lists, **config):
|
| 378 |
+
"""
|
| 379 |
+
:param function: The function that should be applied to
|
| 380 |
+
elements of ``lists``. It should take as many arguments
|
| 381 |
+
as there are ``lists``.
|
| 382 |
+
:param lists: The underlying lists.
|
| 383 |
+
:param cache_size: Determines the size of the cache used
|
| 384 |
+
by this lazy map. (default=5)
|
| 385 |
+
"""
|
| 386 |
+
if not lists:
|
| 387 |
+
raise TypeError("LazyMap requires at least two args")
|
| 388 |
+
|
| 389 |
+
self._lists = lists
|
| 390 |
+
self._func = function
|
| 391 |
+
self._cache_size = config.get("cache_size", 5)
|
| 392 |
+
self._cache = {} if self._cache_size > 0 else None
|
| 393 |
+
|
| 394 |
+
# If you just take bool() of sum() here _all_lazy will be true just
|
| 395 |
+
# in case n >= 1 list is an AbstractLazySequence. Presumably this
|
| 396 |
+
# isn't what's intended.
|
| 397 |
+
self._all_lazy = sum(
|
| 398 |
+
isinstance(lst, AbstractLazySequence) for lst in lists
|
| 399 |
+
) == len(lists)
|
| 400 |
+
|
| 401 |
+
def iterate_from(self, index):
|
| 402 |
+
# Special case: one lazy sublist
|
| 403 |
+
if len(self._lists) == 1 and self._all_lazy:
|
| 404 |
+
for value in self._lists[0].iterate_from(index):
|
| 405 |
+
yield self._func(value)
|
| 406 |
+
return
|
| 407 |
+
|
| 408 |
+
# Special case: one non-lazy sublist
|
| 409 |
+
elif len(self._lists) == 1:
|
| 410 |
+
while True:
|
| 411 |
+
try:
|
| 412 |
+
yield self._func(self._lists[0][index])
|
| 413 |
+
except IndexError:
|
| 414 |
+
return
|
| 415 |
+
index += 1
|
| 416 |
+
|
| 417 |
+
# Special case: n lazy sublists
|
| 418 |
+
elif self._all_lazy:
|
| 419 |
+
iterators = [lst.iterate_from(index) for lst in self._lists]
|
| 420 |
+
while True:
|
| 421 |
+
elements = []
|
| 422 |
+
for iterator in iterators:
|
| 423 |
+
try:
|
| 424 |
+
elements.append(next(iterator))
|
| 425 |
+
except: # FIXME: What is this except really catching? StopIteration?
|
| 426 |
+
elements.append(None)
|
| 427 |
+
if elements == [None] * len(self._lists):
|
| 428 |
+
return
|
| 429 |
+
yield self._func(*elements)
|
| 430 |
+
index += 1
|
| 431 |
+
|
| 432 |
+
# general case
|
| 433 |
+
else:
|
| 434 |
+
while True:
|
| 435 |
+
try:
|
| 436 |
+
elements = [lst[index] for lst in self._lists]
|
| 437 |
+
except IndexError:
|
| 438 |
+
elements = [None] * len(self._lists)
|
| 439 |
+
for i, lst in enumerate(self._lists):
|
| 440 |
+
try:
|
| 441 |
+
elements[i] = lst[index]
|
| 442 |
+
except IndexError:
|
| 443 |
+
pass
|
| 444 |
+
if elements == [None] * len(self._lists):
|
| 445 |
+
return
|
| 446 |
+
yield self._func(*elements)
|
| 447 |
+
index += 1
|
| 448 |
+
|
| 449 |
+
def __getitem__(self, index):
|
| 450 |
+
if isinstance(index, slice):
|
| 451 |
+
sliced_lists = [lst[index] for lst in self._lists]
|
| 452 |
+
return LazyMap(self._func, *sliced_lists)
|
| 453 |
+
else:
|
| 454 |
+
# Handle negative indices
|
| 455 |
+
if index < 0:
|
| 456 |
+
index += len(self)
|
| 457 |
+
if index < 0:
|
| 458 |
+
raise IndexError("index out of range")
|
| 459 |
+
# Check the cache
|
| 460 |
+
if self._cache is not None and index in self._cache:
|
| 461 |
+
return self._cache[index]
|
| 462 |
+
# Calculate the value
|
| 463 |
+
try:
|
| 464 |
+
val = next(self.iterate_from(index))
|
| 465 |
+
except StopIteration as e:
|
| 466 |
+
raise IndexError("index out of range") from e
|
| 467 |
+
# Update the cache
|
| 468 |
+
if self._cache is not None:
|
| 469 |
+
if len(self._cache) > self._cache_size:
|
| 470 |
+
self._cache.popitem() # discard random entry
|
| 471 |
+
self._cache[index] = val
|
| 472 |
+
# Return the value
|
| 473 |
+
return val
|
| 474 |
+
|
| 475 |
+
def __len__(self):
|
| 476 |
+
return max(len(lst) for lst in self._lists)
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
class LazyZip(LazyMap):
|
| 480 |
+
"""
|
| 481 |
+
A lazy sequence whose elements are tuples, each containing the i-th
|
| 482 |
+
element from each of the argument sequences. The returned list is
|
| 483 |
+
truncated in length to the length of the shortest argument sequence. The
|
| 484 |
+
tuples are constructed lazily -- i.e., when you read a value from the
|
| 485 |
+
list, ``LazyZip`` will calculate that value by forming a tuple from
|
| 486 |
+
the i-th element of each of the argument sequences.
|
| 487 |
+
|
| 488 |
+
``LazyZip`` is essentially a lazy version of the Python primitive function
|
| 489 |
+
``zip``. In particular, an evaluated LazyZip is equivalent to a zip:
|
| 490 |
+
|
| 491 |
+
>>> from nltk.collections import LazyZip
|
| 492 |
+
>>> sequence1, sequence2 = [1, 2, 3], ['a', 'b', 'c']
|
| 493 |
+
>>> zip(sequence1, sequence2) # doctest: +SKIP
|
| 494 |
+
[(1, 'a'), (2, 'b'), (3, 'c')]
|
| 495 |
+
>>> list(LazyZip(sequence1, sequence2))
|
| 496 |
+
[(1, 'a'), (2, 'b'), (3, 'c')]
|
| 497 |
+
>>> sequences = [sequence1, sequence2, [6,7,8,9]]
|
| 498 |
+
>>> list(zip(*sequences)) == list(LazyZip(*sequences))
|
| 499 |
+
True
|
| 500 |
+
|
| 501 |
+
Lazy zips can be useful for conserving memory in cases where the argument
|
| 502 |
+
sequences are particularly long.
|
| 503 |
+
|
| 504 |
+
A typical example of a use case for this class is combining long sequences
|
| 505 |
+
of gold standard and predicted values in a classification or tagging task
|
| 506 |
+
in order to calculate accuracy. By constructing tuples lazily and
|
| 507 |
+
avoiding the creation of an additional long sequence, memory usage can be
|
| 508 |
+
significantly reduced.
|
| 509 |
+
"""
|
| 510 |
+
|
| 511 |
+
def __init__(self, *lists):
|
| 512 |
+
"""
|
| 513 |
+
:param lists: the underlying lists
|
| 514 |
+
:type lists: list(list)
|
| 515 |
+
"""
|
| 516 |
+
LazyMap.__init__(self, lambda *elts: elts, *lists)
|
| 517 |
+
|
| 518 |
+
def iterate_from(self, index):
|
| 519 |
+
iterator = LazyMap.iterate_from(self, index)
|
| 520 |
+
while index < len(self):
|
| 521 |
+
yield next(iterator)
|
| 522 |
+
index += 1
|
| 523 |
+
return
|
| 524 |
+
|
| 525 |
+
def __len__(self):
|
| 526 |
+
return min(len(lst) for lst in self._lists)
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
class LazyEnumerate(LazyZip):
|
| 530 |
+
"""
|
| 531 |
+
A lazy sequence whose elements are tuples, each containing a count (from
|
| 532 |
+
zero) and a value yielded by underlying sequence. ``LazyEnumerate`` is
|
| 533 |
+
useful for obtaining an indexed list. The tuples are constructed lazily
|
| 534 |
+
-- i.e., when you read a value from the list, ``LazyEnumerate`` will
|
| 535 |
+
calculate that value by forming a tuple from the count of the i-th
|
| 536 |
+
element and the i-th element of the underlying sequence.
|
| 537 |
+
|
| 538 |
+
``LazyEnumerate`` is essentially a lazy version of the Python primitive
|
| 539 |
+
function ``enumerate``. In particular, the following two expressions are
|
| 540 |
+
equivalent:
|
| 541 |
+
|
| 542 |
+
>>> from nltk.collections import LazyEnumerate
|
| 543 |
+
>>> sequence = ['first', 'second', 'third']
|
| 544 |
+
>>> list(enumerate(sequence))
|
| 545 |
+
[(0, 'first'), (1, 'second'), (2, 'third')]
|
| 546 |
+
>>> list(LazyEnumerate(sequence))
|
| 547 |
+
[(0, 'first'), (1, 'second'), (2, 'third')]
|
| 548 |
+
|
| 549 |
+
Lazy enumerations can be useful for conserving memory in cases where the
|
| 550 |
+
argument sequences are particularly long.
|
| 551 |
+
|
| 552 |
+
A typical example of a use case for this class is obtaining an indexed
|
| 553 |
+
list for a long sequence of values. By constructing tuples lazily and
|
| 554 |
+
avoiding the creation of an additional long sequence, memory usage can be
|
| 555 |
+
significantly reduced.
|
| 556 |
+
"""
|
| 557 |
+
|
| 558 |
+
def __init__(self, lst):
|
| 559 |
+
"""
|
| 560 |
+
:param lst: the underlying list
|
| 561 |
+
:type lst: list
|
| 562 |
+
"""
|
| 563 |
+
LazyZip.__init__(self, range(len(lst)), lst)
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
class LazyIteratorList(AbstractLazySequence):
|
| 567 |
+
"""
|
| 568 |
+
Wraps an iterator, loading its elements on demand
|
| 569 |
+
and making them subscriptable.
|
| 570 |
+
__repr__ displays only the first few elements.
|
| 571 |
+
"""
|
| 572 |
+
|
| 573 |
+
def __init__(self, it, known_len=None):
|
| 574 |
+
self._it = it
|
| 575 |
+
self._len = known_len
|
| 576 |
+
self._cache = []
|
| 577 |
+
|
| 578 |
+
def __len__(self):
|
| 579 |
+
if self._len:
|
| 580 |
+
return self._len
|
| 581 |
+
for _ in self.iterate_from(len(self._cache)):
|
| 582 |
+
pass
|
| 583 |
+
self._len = len(self._cache)
|
| 584 |
+
return self._len
|
| 585 |
+
|
| 586 |
+
def iterate_from(self, start):
|
| 587 |
+
"""Create a new iterator over this list starting at the given offset."""
|
| 588 |
+
while len(self._cache) < start:
|
| 589 |
+
v = next(self._it)
|
| 590 |
+
self._cache.append(v)
|
| 591 |
+
i = start
|
| 592 |
+
while i < len(self._cache):
|
| 593 |
+
yield self._cache[i]
|
| 594 |
+
i += 1
|
| 595 |
+
try:
|
| 596 |
+
while True:
|
| 597 |
+
v = next(self._it)
|
| 598 |
+
self._cache.append(v)
|
| 599 |
+
yield v
|
| 600 |
+
except StopIteration:
|
| 601 |
+
pass
|
| 602 |
+
|
| 603 |
+
def __add__(self, other):
|
| 604 |
+
"""Return a list concatenating self with other."""
|
| 605 |
+
return type(self)(chain(self, other))
|
| 606 |
+
|
| 607 |
+
def __radd__(self, other):
|
| 608 |
+
"""Return a list concatenating other with self."""
|
| 609 |
+
return type(self)(chain(other, self))
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
######################################################################
|
| 613 |
+
# Trie Implementation
|
| 614 |
+
######################################################################
|
| 615 |
+
class Trie(dict):
|
| 616 |
+
"""A Trie implementation for strings"""
|
| 617 |
+
|
| 618 |
+
LEAF = True
|
| 619 |
+
|
| 620 |
+
def __init__(self, strings=None):
|
| 621 |
+
"""Builds a Trie object, which is built around a ``dict``
|
| 622 |
+
|
| 623 |
+
If ``strings`` is provided, it will add the ``strings``, which
|
| 624 |
+
consist of a ``list`` of ``strings``, to the Trie.
|
| 625 |
+
Otherwise, it'll construct an empty Trie.
|
| 626 |
+
|
| 627 |
+
:param strings: List of strings to insert into the trie
|
| 628 |
+
(Default is ``None``)
|
| 629 |
+
:type strings: list(str)
|
| 630 |
+
|
| 631 |
+
"""
|
| 632 |
+
super().__init__()
|
| 633 |
+
if strings:
|
| 634 |
+
for string in strings:
|
| 635 |
+
self.insert(string)
|
| 636 |
+
|
| 637 |
+
def insert(self, string):
|
| 638 |
+
"""Inserts ``string`` into the Trie
|
| 639 |
+
|
| 640 |
+
:param string: String to insert into the trie
|
| 641 |
+
:type string: str
|
| 642 |
+
|
| 643 |
+
:Example:
|
| 644 |
+
|
| 645 |
+
>>> from nltk.collections import Trie
|
| 646 |
+
>>> trie = Trie(["abc", "def"])
|
| 647 |
+
>>> expected = {'a': {'b': {'c': {True: None}}}, \
|
| 648 |
+
'd': {'e': {'f': {True: None}}}}
|
| 649 |
+
>>> trie == expected
|
| 650 |
+
True
|
| 651 |
+
|
| 652 |
+
"""
|
| 653 |
+
if len(string):
|
| 654 |
+
self[string[0]].insert(string[1:])
|
| 655 |
+
else:
|
| 656 |
+
# mark the string is complete
|
| 657 |
+
self[Trie.LEAF] = None
|
| 658 |
+
|
| 659 |
+
def __missing__(self, key):
|
| 660 |
+
self[key] = Trie()
|
| 661 |
+
return self[key]
|
lib/python3.10/site-packages/nltk/collocations.py
ADDED
|
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: Collocations and Association Measures
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Author: Joel Nothman <jnothman@student.usyd.edu.au>
|
| 5 |
+
# URL: <https://www.nltk.org/>
|
| 6 |
+
# For license information, see LICENSE.TXT
|
| 7 |
+
#
|
| 8 |
+
"""
|
| 9 |
+
Tools to identify collocations --- words that often appear consecutively
|
| 10 |
+
--- within corpora. They may also be used to find other associations between
|
| 11 |
+
word occurrences.
|
| 12 |
+
See Manning and Schutze ch. 5 at https://nlp.stanford.edu/fsnlp/promo/colloc.pdf
|
| 13 |
+
and the Text::NSP Perl package at http://ngram.sourceforge.net
|
| 14 |
+
|
| 15 |
+
Finding collocations requires first calculating the frequencies of words and
|
| 16 |
+
their appearance in the context of other words. Often the collection of words
|
| 17 |
+
will then requiring filtering to only retain useful content terms. Each ngram
|
| 18 |
+
of words may then be scored according to some association measure, in order
|
| 19 |
+
to determine the relative likelihood of each ngram being a collocation.
|
| 20 |
+
|
| 21 |
+
The ``BigramCollocationFinder`` and ``TrigramCollocationFinder`` classes provide
|
| 22 |
+
these functionalities, dependent on being provided a function which scores a
|
| 23 |
+
ngram given appropriate frequency counts. A number of standard association
|
| 24 |
+
measures are provided in bigram_measures and trigram_measures.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
# Possible TODOs:
|
| 28 |
+
# - consider the distinction between f(x,_) and f(x) and whether our
|
| 29 |
+
# approximation is good enough for fragmented data, and mention it
|
| 30 |
+
# - add a n-gram collocation finder with measures which only utilise n-gram
|
| 31 |
+
# and unigram counts (raw_freq, pmi, student_t)
|
| 32 |
+
|
| 33 |
+
import itertools as _itertools
|
| 34 |
+
|
| 35 |
+
# these two unused imports are referenced in collocations.doctest
|
| 36 |
+
from nltk.metrics import (
|
| 37 |
+
BigramAssocMeasures,
|
| 38 |
+
ContingencyMeasures,
|
| 39 |
+
QuadgramAssocMeasures,
|
| 40 |
+
TrigramAssocMeasures,
|
| 41 |
+
)
|
| 42 |
+
from nltk.metrics.spearman import ranks_from_scores, spearman_correlation
|
| 43 |
+
from nltk.probability import FreqDist
|
| 44 |
+
from nltk.util import ngrams
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class AbstractCollocationFinder:
|
| 48 |
+
"""
|
| 49 |
+
An abstract base class for collocation finders whose purpose is to
|
| 50 |
+
collect collocation candidate frequencies, filter and rank them.
|
| 51 |
+
|
| 52 |
+
As a minimum, collocation finders require the frequencies of each
|
| 53 |
+
word in a corpus, and the joint frequency of word tuples. This data
|
| 54 |
+
should be provided through nltk.probability.FreqDist objects or an
|
| 55 |
+
identical interface.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(self, word_fd, ngram_fd):
|
| 59 |
+
self.word_fd = word_fd
|
| 60 |
+
self.N = word_fd.N()
|
| 61 |
+
self.ngram_fd = ngram_fd
|
| 62 |
+
|
| 63 |
+
@classmethod
|
| 64 |
+
def _build_new_documents(
|
| 65 |
+
cls, documents, window_size, pad_left=False, pad_right=False, pad_symbol=None
|
| 66 |
+
):
|
| 67 |
+
"""
|
| 68 |
+
Pad the document with the place holder according to the window_size
|
| 69 |
+
"""
|
| 70 |
+
padding = (pad_symbol,) * (window_size - 1)
|
| 71 |
+
if pad_right:
|
| 72 |
+
return _itertools.chain.from_iterable(
|
| 73 |
+
_itertools.chain(doc, padding) for doc in documents
|
| 74 |
+
)
|
| 75 |
+
if pad_left:
|
| 76 |
+
return _itertools.chain.from_iterable(
|
| 77 |
+
_itertools.chain(padding, doc) for doc in documents
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
@classmethod
|
| 81 |
+
def from_documents(cls, documents):
|
| 82 |
+
"""Constructs a collocation finder given a collection of documents,
|
| 83 |
+
each of which is a list (or iterable) of tokens.
|
| 84 |
+
"""
|
| 85 |
+
# return cls.from_words(_itertools.chain(*documents))
|
| 86 |
+
return cls.from_words(
|
| 87 |
+
cls._build_new_documents(documents, cls.default_ws, pad_right=True)
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
@staticmethod
|
| 91 |
+
def _ngram_freqdist(words, n):
|
| 92 |
+
return FreqDist(tuple(words[i : i + n]) for i in range(len(words) - 1))
|
| 93 |
+
|
| 94 |
+
def _apply_filter(self, fn=lambda ngram, freq: False):
|
| 95 |
+
"""Generic filter removes ngrams from the frequency distribution
|
| 96 |
+
if the function returns True when passed an ngram tuple.
|
| 97 |
+
"""
|
| 98 |
+
tmp_ngram = FreqDist()
|
| 99 |
+
for ngram, freq in self.ngram_fd.items():
|
| 100 |
+
if not fn(ngram, freq):
|
| 101 |
+
tmp_ngram[ngram] = freq
|
| 102 |
+
self.ngram_fd = tmp_ngram
|
| 103 |
+
|
| 104 |
+
def apply_freq_filter(self, min_freq):
|
| 105 |
+
"""Removes candidate ngrams which have frequency less than min_freq."""
|
| 106 |
+
self._apply_filter(lambda ng, freq: freq < min_freq)
|
| 107 |
+
|
| 108 |
+
def apply_ngram_filter(self, fn):
|
| 109 |
+
"""Removes candidate ngrams (w1, w2, ...) where fn(w1, w2, ...)
|
| 110 |
+
evaluates to True.
|
| 111 |
+
"""
|
| 112 |
+
self._apply_filter(lambda ng, f: fn(*ng))
|
| 113 |
+
|
| 114 |
+
def apply_word_filter(self, fn):
|
| 115 |
+
"""Removes candidate ngrams (w1, w2, ...) where any of (fn(w1), fn(w2),
|
| 116 |
+
...) evaluates to True.
|
| 117 |
+
"""
|
| 118 |
+
self._apply_filter(lambda ng, f: any(fn(w) for w in ng))
|
| 119 |
+
|
| 120 |
+
def _score_ngrams(self, score_fn):
|
| 121 |
+
"""Generates of (ngram, score) pairs as determined by the scoring
|
| 122 |
+
function provided.
|
| 123 |
+
"""
|
| 124 |
+
for tup in self.ngram_fd:
|
| 125 |
+
score = self.score_ngram(score_fn, *tup)
|
| 126 |
+
if score is not None:
|
| 127 |
+
yield tup, score
|
| 128 |
+
|
| 129 |
+
def score_ngrams(self, score_fn):
|
| 130 |
+
"""Returns a sequence of (ngram, score) pairs ordered from highest to
|
| 131 |
+
lowest score, as determined by the scoring function provided.
|
| 132 |
+
"""
|
| 133 |
+
return sorted(self._score_ngrams(score_fn), key=lambda t: (-t[1], t[0]))
|
| 134 |
+
|
| 135 |
+
def nbest(self, score_fn, n):
|
| 136 |
+
"""Returns the top n ngrams when scored by the given function."""
|
| 137 |
+
return [p for p, s in self.score_ngrams(score_fn)[:n]]
|
| 138 |
+
|
| 139 |
+
def above_score(self, score_fn, min_score):
|
| 140 |
+
"""Returns a sequence of ngrams, ordered by decreasing score, whose
|
| 141 |
+
scores each exceed the given minimum score.
|
| 142 |
+
"""
|
| 143 |
+
for ngram, score in self.score_ngrams(score_fn):
|
| 144 |
+
if score > min_score:
|
| 145 |
+
yield ngram
|
| 146 |
+
else:
|
| 147 |
+
break
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class BigramCollocationFinder(AbstractCollocationFinder):
|
| 151 |
+
"""A tool for the finding and ranking of bigram collocations or other
|
| 152 |
+
association measures. It is often useful to use from_words() rather than
|
| 153 |
+
constructing an instance directly.
|
| 154 |
+
"""
|
| 155 |
+
|
| 156 |
+
default_ws = 2
|
| 157 |
+
|
| 158 |
+
def __init__(self, word_fd, bigram_fd, window_size=2):
|
| 159 |
+
"""Construct a BigramCollocationFinder, given FreqDists for
|
| 160 |
+
appearances of words and (possibly non-contiguous) bigrams.
|
| 161 |
+
"""
|
| 162 |
+
AbstractCollocationFinder.__init__(self, word_fd, bigram_fd)
|
| 163 |
+
self.window_size = window_size
|
| 164 |
+
|
| 165 |
+
@classmethod
|
| 166 |
+
def from_words(cls, words, window_size=2):
|
| 167 |
+
"""Construct a BigramCollocationFinder for all bigrams in the given
|
| 168 |
+
sequence. When window_size > 2, count non-contiguous bigrams, in the
|
| 169 |
+
style of Church and Hanks's (1990) association ratio.
|
| 170 |
+
"""
|
| 171 |
+
wfd = FreqDist()
|
| 172 |
+
bfd = FreqDist()
|
| 173 |
+
|
| 174 |
+
if window_size < 2:
|
| 175 |
+
raise ValueError("Specify window_size at least 2")
|
| 176 |
+
|
| 177 |
+
for window in ngrams(words, window_size, pad_right=True):
|
| 178 |
+
w1 = window[0]
|
| 179 |
+
if w1 is None:
|
| 180 |
+
continue
|
| 181 |
+
wfd[w1] += 1
|
| 182 |
+
for w2 in window[1:]:
|
| 183 |
+
if w2 is not None:
|
| 184 |
+
bfd[(w1, w2)] += 1
|
| 185 |
+
return cls(wfd, bfd, window_size=window_size)
|
| 186 |
+
|
| 187 |
+
def score_ngram(self, score_fn, w1, w2):
|
| 188 |
+
"""Returns the score for a given bigram using the given scoring
|
| 189 |
+
function. Following Church and Hanks (1990), counts are scaled by
|
| 190 |
+
a factor of 1/(window_size - 1).
|
| 191 |
+
"""
|
| 192 |
+
n_all = self.N
|
| 193 |
+
n_ii = self.ngram_fd[(w1, w2)] / (self.window_size - 1.0)
|
| 194 |
+
if not n_ii:
|
| 195 |
+
return
|
| 196 |
+
n_ix = self.word_fd[w1]
|
| 197 |
+
n_xi = self.word_fd[w2]
|
| 198 |
+
return score_fn(n_ii, (n_ix, n_xi), n_all)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class TrigramCollocationFinder(AbstractCollocationFinder):
|
| 202 |
+
"""A tool for the finding and ranking of trigram collocations or other
|
| 203 |
+
association measures. It is often useful to use from_words() rather than
|
| 204 |
+
constructing an instance directly.
|
| 205 |
+
"""
|
| 206 |
+
|
| 207 |
+
default_ws = 3
|
| 208 |
+
|
| 209 |
+
def __init__(self, word_fd, bigram_fd, wildcard_fd, trigram_fd):
|
| 210 |
+
"""Construct a TrigramCollocationFinder, given FreqDists for
|
| 211 |
+
appearances of words, bigrams, two words with any word between them,
|
| 212 |
+
and trigrams.
|
| 213 |
+
"""
|
| 214 |
+
AbstractCollocationFinder.__init__(self, word_fd, trigram_fd)
|
| 215 |
+
self.wildcard_fd = wildcard_fd
|
| 216 |
+
self.bigram_fd = bigram_fd
|
| 217 |
+
|
| 218 |
+
@classmethod
|
| 219 |
+
def from_words(cls, words, window_size=3):
|
| 220 |
+
"""Construct a TrigramCollocationFinder for all trigrams in the given
|
| 221 |
+
sequence.
|
| 222 |
+
"""
|
| 223 |
+
if window_size < 3:
|
| 224 |
+
raise ValueError("Specify window_size at least 3")
|
| 225 |
+
|
| 226 |
+
wfd = FreqDist()
|
| 227 |
+
wildfd = FreqDist()
|
| 228 |
+
bfd = FreqDist()
|
| 229 |
+
tfd = FreqDist()
|
| 230 |
+
for window in ngrams(words, window_size, pad_right=True):
|
| 231 |
+
w1 = window[0]
|
| 232 |
+
if w1 is None:
|
| 233 |
+
continue
|
| 234 |
+
for w2, w3 in _itertools.combinations(window[1:], 2):
|
| 235 |
+
wfd[w1] += 1
|
| 236 |
+
if w2 is None:
|
| 237 |
+
continue
|
| 238 |
+
bfd[(w1, w2)] += 1
|
| 239 |
+
if w3 is None:
|
| 240 |
+
continue
|
| 241 |
+
wildfd[(w1, w3)] += 1
|
| 242 |
+
tfd[(w1, w2, w3)] += 1
|
| 243 |
+
return cls(wfd, bfd, wildfd, tfd)
|
| 244 |
+
|
| 245 |
+
def bigram_finder(self):
|
| 246 |
+
"""Constructs a bigram collocation finder with the bigram and unigram
|
| 247 |
+
data from this finder. Note that this does not include any filtering
|
| 248 |
+
applied to this finder.
|
| 249 |
+
"""
|
| 250 |
+
return BigramCollocationFinder(self.word_fd, self.bigram_fd)
|
| 251 |
+
|
| 252 |
+
def score_ngram(self, score_fn, w1, w2, w3):
|
| 253 |
+
"""Returns the score for a given trigram using the given scoring
|
| 254 |
+
function.
|
| 255 |
+
"""
|
| 256 |
+
n_all = self.N
|
| 257 |
+
n_iii = self.ngram_fd[(w1, w2, w3)]
|
| 258 |
+
if not n_iii:
|
| 259 |
+
return
|
| 260 |
+
n_iix = self.bigram_fd[(w1, w2)]
|
| 261 |
+
n_ixi = self.wildcard_fd[(w1, w3)]
|
| 262 |
+
n_xii = self.bigram_fd[(w2, w3)]
|
| 263 |
+
n_ixx = self.word_fd[w1]
|
| 264 |
+
n_xix = self.word_fd[w2]
|
| 265 |
+
n_xxi = self.word_fd[w3]
|
| 266 |
+
return score_fn(n_iii, (n_iix, n_ixi, n_xii), (n_ixx, n_xix, n_xxi), n_all)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class QuadgramCollocationFinder(AbstractCollocationFinder):
|
| 270 |
+
"""A tool for the finding and ranking of quadgram collocations or other association measures.
|
| 271 |
+
It is often useful to use from_words() rather than constructing an instance directly.
|
| 272 |
+
"""
|
| 273 |
+
|
| 274 |
+
default_ws = 4
|
| 275 |
+
|
| 276 |
+
def __init__(self, word_fd, quadgram_fd, ii, iii, ixi, ixxi, iixi, ixii):
|
| 277 |
+
"""Construct a QuadgramCollocationFinder, given FreqDists for appearances of words,
|
| 278 |
+
bigrams, trigrams, two words with one word and two words between them, three words
|
| 279 |
+
with a word between them in both variations.
|
| 280 |
+
"""
|
| 281 |
+
AbstractCollocationFinder.__init__(self, word_fd, quadgram_fd)
|
| 282 |
+
self.iii = iii
|
| 283 |
+
self.ii = ii
|
| 284 |
+
self.ixi = ixi
|
| 285 |
+
self.ixxi = ixxi
|
| 286 |
+
self.iixi = iixi
|
| 287 |
+
self.ixii = ixii
|
| 288 |
+
|
| 289 |
+
@classmethod
|
| 290 |
+
def from_words(cls, words, window_size=4):
|
| 291 |
+
if window_size < 4:
|
| 292 |
+
raise ValueError("Specify window_size at least 4")
|
| 293 |
+
ixxx = FreqDist()
|
| 294 |
+
iiii = FreqDist()
|
| 295 |
+
ii = FreqDist()
|
| 296 |
+
iii = FreqDist()
|
| 297 |
+
ixi = FreqDist()
|
| 298 |
+
ixxi = FreqDist()
|
| 299 |
+
iixi = FreqDist()
|
| 300 |
+
ixii = FreqDist()
|
| 301 |
+
|
| 302 |
+
for window in ngrams(words, window_size, pad_right=True):
|
| 303 |
+
w1 = window[0]
|
| 304 |
+
if w1 is None:
|
| 305 |
+
continue
|
| 306 |
+
for w2, w3, w4 in _itertools.combinations(window[1:], 3):
|
| 307 |
+
ixxx[w1] += 1
|
| 308 |
+
if w2 is None:
|
| 309 |
+
continue
|
| 310 |
+
ii[(w1, w2)] += 1
|
| 311 |
+
if w3 is None:
|
| 312 |
+
continue
|
| 313 |
+
iii[(w1, w2, w3)] += 1
|
| 314 |
+
ixi[(w1, w3)] += 1
|
| 315 |
+
if w4 is None:
|
| 316 |
+
continue
|
| 317 |
+
iiii[(w1, w2, w3, w4)] += 1
|
| 318 |
+
ixxi[(w1, w4)] += 1
|
| 319 |
+
ixii[(w1, w3, w4)] += 1
|
| 320 |
+
iixi[(w1, w2, w4)] += 1
|
| 321 |
+
|
| 322 |
+
return cls(ixxx, iiii, ii, iii, ixi, ixxi, iixi, ixii)
|
| 323 |
+
|
| 324 |
+
def score_ngram(self, score_fn, w1, w2, w3, w4):
|
| 325 |
+
n_all = self.N
|
| 326 |
+
n_iiii = self.ngram_fd[(w1, w2, w3, w4)]
|
| 327 |
+
if not n_iiii:
|
| 328 |
+
return
|
| 329 |
+
n_iiix = self.iii[(w1, w2, w3)]
|
| 330 |
+
n_xiii = self.iii[(w2, w3, w4)]
|
| 331 |
+
n_iixi = self.iixi[(w1, w2, w4)]
|
| 332 |
+
n_ixii = self.ixii[(w1, w3, w4)]
|
| 333 |
+
|
| 334 |
+
n_iixx = self.ii[(w1, w2)]
|
| 335 |
+
n_xxii = self.ii[(w3, w4)]
|
| 336 |
+
n_xiix = self.ii[(w2, w3)]
|
| 337 |
+
n_ixix = self.ixi[(w1, w3)]
|
| 338 |
+
n_ixxi = self.ixxi[(w1, w4)]
|
| 339 |
+
n_xixi = self.ixi[(w2, w4)]
|
| 340 |
+
|
| 341 |
+
n_ixxx = self.word_fd[w1]
|
| 342 |
+
n_xixx = self.word_fd[w2]
|
| 343 |
+
n_xxix = self.word_fd[w3]
|
| 344 |
+
n_xxxi = self.word_fd[w4]
|
| 345 |
+
return score_fn(
|
| 346 |
+
n_iiii,
|
| 347 |
+
(n_iiix, n_iixi, n_ixii, n_xiii),
|
| 348 |
+
(n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix),
|
| 349 |
+
(n_ixxx, n_xixx, n_xxix, n_xxxi),
|
| 350 |
+
n_all,
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def demo(scorer=None, compare_scorer=None):
|
| 355 |
+
"""Finds bigram collocations in the files of the WebText corpus."""
|
| 356 |
+
from nltk.metrics import (
|
| 357 |
+
BigramAssocMeasures,
|
| 358 |
+
ranks_from_scores,
|
| 359 |
+
spearman_correlation,
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
if scorer is None:
|
| 363 |
+
scorer = BigramAssocMeasures.likelihood_ratio
|
| 364 |
+
if compare_scorer is None:
|
| 365 |
+
compare_scorer = BigramAssocMeasures.raw_freq
|
| 366 |
+
|
| 367 |
+
from nltk.corpus import stopwords, webtext
|
| 368 |
+
|
| 369 |
+
ignored_words = stopwords.words("english")
|
| 370 |
+
word_filter = lambda w: len(w) < 3 or w.lower() in ignored_words
|
| 371 |
+
|
| 372 |
+
for file in webtext.fileids():
|
| 373 |
+
words = [word.lower() for word in webtext.words(file)]
|
| 374 |
+
|
| 375 |
+
cf = BigramCollocationFinder.from_words(words)
|
| 376 |
+
cf.apply_freq_filter(3)
|
| 377 |
+
cf.apply_word_filter(word_filter)
|
| 378 |
+
|
| 379 |
+
corr = spearman_correlation(
|
| 380 |
+
ranks_from_scores(cf.score_ngrams(scorer)),
|
| 381 |
+
ranks_from_scores(cf.score_ngrams(compare_scorer)),
|
| 382 |
+
)
|
| 383 |
+
print(file)
|
| 384 |
+
print("\t", [" ".join(tup) for tup in cf.nbest(scorer, 15)])
|
| 385 |
+
print(f"\t Correlation to {compare_scorer.__name__}: {corr:0.4f}")
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
# Slows down loading too much
|
| 389 |
+
# bigram_measures = BigramAssocMeasures()
|
| 390 |
+
# trigram_measures = TrigramAssocMeasures()
|
| 391 |
+
|
| 392 |
+
if __name__ == "__main__":
|
| 393 |
+
import sys
|
| 394 |
+
|
| 395 |
+
from nltk.metrics import BigramAssocMeasures
|
| 396 |
+
|
| 397 |
+
try:
|
| 398 |
+
scorer = eval("BigramAssocMeasures." + sys.argv[1])
|
| 399 |
+
except IndexError:
|
| 400 |
+
scorer = None
|
| 401 |
+
try:
|
| 402 |
+
compare_scorer = eval("BigramAssocMeasures." + sys.argv[2])
|
| 403 |
+
except IndexError:
|
| 404 |
+
compare_scorer = None
|
| 405 |
+
|
| 406 |
+
demo(scorer, compare_scorer)
|
| 407 |
+
|
| 408 |
+
__all__ = [
|
| 409 |
+
"BigramCollocationFinder",
|
| 410 |
+
"TrigramCollocationFinder",
|
| 411 |
+
"QuadgramCollocationFinder",
|
| 412 |
+
]
|
lib/python3.10/site-packages/nltk/compat.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: Compatibility
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
#
|
| 5 |
+
# URL: <https://www.nltk.org/>
|
| 6 |
+
# For license information, see LICENSE.TXT
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
from functools import wraps
|
| 10 |
+
|
| 11 |
+
# ======= Compatibility for datasets that care about Python versions ========
|
| 12 |
+
|
| 13 |
+
# The following datasets have a /PY3 subdirectory containing
|
| 14 |
+
# a full copy of the data which has been re-encoded or repickled.
|
| 15 |
+
DATA_UPDATES = [
|
| 16 |
+
("chunkers", "maxent_ne_chunker"),
|
| 17 |
+
("help", "tagsets"),
|
| 18 |
+
("taggers", "maxent_treebank_pos_tagger"),
|
| 19 |
+
("tokenizers", "punkt"),
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
_PY3_DATA_UPDATES = [os.path.join(*path_list) for path_list in DATA_UPDATES]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def add_py3_data(path):
|
| 26 |
+
for item in _PY3_DATA_UPDATES:
|
| 27 |
+
if item in str(path) and "/PY3" not in str(path):
|
| 28 |
+
pos = path.index(item) + len(item)
|
| 29 |
+
if path[pos : pos + 4] == ".zip":
|
| 30 |
+
pos += 4
|
| 31 |
+
path = path[:pos] + "/PY3" + path[pos:]
|
| 32 |
+
break
|
| 33 |
+
return path
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# for use in adding /PY3 to the second (filename) argument
|
| 37 |
+
# of the file pointers in data.py
|
| 38 |
+
def py3_data(init_func):
|
| 39 |
+
def _decorator(*args, **kwargs):
|
| 40 |
+
args = (args[0], add_py3_data(args[1])) + args[2:]
|
| 41 |
+
return init_func(*args, **kwargs)
|
| 42 |
+
|
| 43 |
+
return wraps(init_func)(_decorator)
|
lib/python3.10/site-packages/nltk/data.py
ADDED
|
@@ -0,0 +1,1441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: Utility functions
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Author: Edward Loper <edloper@gmail.com>
|
| 5 |
+
# URL: <https://www.nltk.org/>
|
| 6 |
+
# For license information, see LICENSE.TXT
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
Functions to find and load NLTK resource files, such as corpora,
|
| 10 |
+
grammars, and saved processing objects. Resource files are identified
|
| 11 |
+
using URLs, such as ``nltk:corpora/abc/rural.txt`` or
|
| 12 |
+
``https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg``.
|
| 13 |
+
The following URL protocols are supported:
|
| 14 |
+
|
| 15 |
+
- ``file:path``: Specifies the file whose path is *path*.
|
| 16 |
+
Both relative and absolute paths may be used.
|
| 17 |
+
|
| 18 |
+
- ``https://host/path``: Specifies the file stored on the web
|
| 19 |
+
server *host* at path *path*.
|
| 20 |
+
|
| 21 |
+
- ``nltk:path``: Specifies the file stored in the NLTK data
|
| 22 |
+
package at *path*. NLTK will search for these files in the
|
| 23 |
+
directories specified by ``nltk.data.path``.
|
| 24 |
+
|
| 25 |
+
If no protocol is specified, then the default protocol ``nltk:`` will
|
| 26 |
+
be used.
|
| 27 |
+
|
| 28 |
+
This module provides to functions that can be used to access a
|
| 29 |
+
resource file, given its URL: ``load()`` loads a given resource, and
|
| 30 |
+
adds it to a resource cache; and ``retrieve()`` copies a given resource
|
| 31 |
+
to a local file.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
import codecs
|
| 35 |
+
import functools
|
| 36 |
+
import os
|
| 37 |
+
import pickle
|
| 38 |
+
import re
|
| 39 |
+
import sys
|
| 40 |
+
import textwrap
|
| 41 |
+
import zipfile
|
| 42 |
+
from abc import ABCMeta, abstractmethod
|
| 43 |
+
from gzip import WRITE as GZ_WRITE
|
| 44 |
+
from gzip import GzipFile
|
| 45 |
+
from io import BytesIO, TextIOWrapper
|
| 46 |
+
from urllib.request import url2pathname, urlopen
|
| 47 |
+
|
| 48 |
+
try:
|
| 49 |
+
from zlib import Z_SYNC_FLUSH as FLUSH
|
| 50 |
+
except ImportError:
|
| 51 |
+
from zlib import Z_FINISH as FLUSH
|
| 52 |
+
|
| 53 |
+
from nltk import grammar, sem
|
| 54 |
+
from nltk.compat import add_py3_data, py3_data
|
| 55 |
+
from nltk.internals import deprecated
|
| 56 |
+
|
| 57 |
+
textwrap_indent = functools.partial(textwrap.indent, prefix=" ")
|
| 58 |
+
|
| 59 |
+
######################################################################
|
| 60 |
+
# Search Path
|
| 61 |
+
######################################################################
|
| 62 |
+
|
| 63 |
+
path = []
|
| 64 |
+
"""A list of directories where the NLTK data package might reside.
|
| 65 |
+
These directories will be checked in order when looking for a
|
| 66 |
+
resource in the data package. Note that this allows users to
|
| 67 |
+
substitute in their own versions of resources, if they have them
|
| 68 |
+
(e.g., in their home directory under ~/nltk_data)."""
|
| 69 |
+
|
| 70 |
+
# User-specified locations:
|
| 71 |
+
_paths_from_env = os.environ.get("NLTK_DATA", "").split(os.pathsep)
|
| 72 |
+
path += [d for d in _paths_from_env if d]
|
| 73 |
+
if "APPENGINE_RUNTIME" not in os.environ and os.path.expanduser("~/") != "~/":
|
| 74 |
+
path.append(os.path.expanduser("~/nltk_data"))
|
| 75 |
+
|
| 76 |
+
if sys.platform.startswith("win"):
|
| 77 |
+
# Common locations on Windows:
|
| 78 |
+
path += [
|
| 79 |
+
os.path.join(sys.prefix, "nltk_data"),
|
| 80 |
+
os.path.join(sys.prefix, "share", "nltk_data"),
|
| 81 |
+
os.path.join(sys.prefix, "lib", "nltk_data"),
|
| 82 |
+
os.path.join(os.environ.get("APPDATA", "C:\\"), "nltk_data"),
|
| 83 |
+
r"C:\nltk_data",
|
| 84 |
+
r"D:\nltk_data",
|
| 85 |
+
r"E:\nltk_data",
|
| 86 |
+
]
|
| 87 |
+
else:
|
| 88 |
+
# Common locations on UNIX & OS X:
|
| 89 |
+
path += [
|
| 90 |
+
os.path.join(sys.prefix, "nltk_data"),
|
| 91 |
+
os.path.join(sys.prefix, "share", "nltk_data"),
|
| 92 |
+
os.path.join(sys.prefix, "lib", "nltk_data"),
|
| 93 |
+
"/usr/share/nltk_data",
|
| 94 |
+
"/usr/local/share/nltk_data",
|
| 95 |
+
"/usr/lib/nltk_data",
|
| 96 |
+
"/usr/local/lib/nltk_data",
|
| 97 |
+
]
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
######################################################################
|
| 101 |
+
# Util Functions
|
| 102 |
+
######################################################################
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def gzip_open_unicode(
|
| 106 |
+
filename,
|
| 107 |
+
mode="rb",
|
| 108 |
+
compresslevel=9,
|
| 109 |
+
encoding="utf-8",
|
| 110 |
+
fileobj=None,
|
| 111 |
+
errors=None,
|
| 112 |
+
newline=None,
|
| 113 |
+
):
|
| 114 |
+
if fileobj is None:
|
| 115 |
+
fileobj = GzipFile(filename, mode, compresslevel, fileobj)
|
| 116 |
+
return TextIOWrapper(fileobj, encoding, errors, newline)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def split_resource_url(resource_url):
|
| 120 |
+
"""
|
| 121 |
+
Splits a resource url into "<protocol>:<path>".
|
| 122 |
+
|
| 123 |
+
>>> windows = sys.platform.startswith('win')
|
| 124 |
+
>>> split_resource_url('nltk:home/nltk')
|
| 125 |
+
('nltk', 'home/nltk')
|
| 126 |
+
>>> split_resource_url('nltk:/home/nltk')
|
| 127 |
+
('nltk', '/home/nltk')
|
| 128 |
+
>>> split_resource_url('file:/home/nltk')
|
| 129 |
+
('file', '/home/nltk')
|
| 130 |
+
>>> split_resource_url('file:///home/nltk')
|
| 131 |
+
('file', '/home/nltk')
|
| 132 |
+
>>> split_resource_url('file:///C:/home/nltk')
|
| 133 |
+
('file', '/C:/home/nltk')
|
| 134 |
+
"""
|
| 135 |
+
protocol, path_ = resource_url.split(":", 1)
|
| 136 |
+
if protocol == "nltk":
|
| 137 |
+
pass
|
| 138 |
+
elif protocol == "file":
|
| 139 |
+
if path_.startswith("/"):
|
| 140 |
+
path_ = "/" + path_.lstrip("/")
|
| 141 |
+
else:
|
| 142 |
+
path_ = re.sub(r"^/{0,2}", "", path_)
|
| 143 |
+
return protocol, path_
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def normalize_resource_url(resource_url):
|
| 147 |
+
r"""
|
| 148 |
+
Normalizes a resource url
|
| 149 |
+
|
| 150 |
+
>>> windows = sys.platform.startswith('win')
|
| 151 |
+
>>> os.path.normpath(split_resource_url(normalize_resource_url('file:grammar.fcfg'))[1]) == \
|
| 152 |
+
... ('\\' if windows else '') + os.path.abspath(os.path.join(os.curdir, 'grammar.fcfg'))
|
| 153 |
+
True
|
| 154 |
+
>>> not windows or normalize_resource_url('file:C:/dir/file') == 'file:///C:/dir/file'
|
| 155 |
+
True
|
| 156 |
+
>>> not windows or normalize_resource_url('file:C:\\dir\\file') == 'file:///C:/dir/file'
|
| 157 |
+
True
|
| 158 |
+
>>> not windows or normalize_resource_url('file:C:\\dir/file') == 'file:///C:/dir/file'
|
| 159 |
+
True
|
| 160 |
+
>>> not windows or normalize_resource_url('file://C:/dir/file') == 'file:///C:/dir/file'
|
| 161 |
+
True
|
| 162 |
+
>>> not windows or normalize_resource_url('file:////C:/dir/file') == 'file:///C:/dir/file'
|
| 163 |
+
True
|
| 164 |
+
>>> not windows or normalize_resource_url('nltk:C:/dir/file') == 'file:///C:/dir/file'
|
| 165 |
+
True
|
| 166 |
+
>>> not windows or normalize_resource_url('nltk:C:\\dir\\file') == 'file:///C:/dir/file'
|
| 167 |
+
True
|
| 168 |
+
>>> windows or normalize_resource_url('file:/dir/file/toy.cfg') == 'file:///dir/file/toy.cfg'
|
| 169 |
+
True
|
| 170 |
+
>>> normalize_resource_url('nltk:home/nltk')
|
| 171 |
+
'nltk:home/nltk'
|
| 172 |
+
>>> windows or normalize_resource_url('nltk:/home/nltk') == 'file:///home/nltk'
|
| 173 |
+
True
|
| 174 |
+
>>> normalize_resource_url('https://example.com/dir/file')
|
| 175 |
+
'https://example.com/dir/file'
|
| 176 |
+
>>> normalize_resource_url('dir/file')
|
| 177 |
+
'nltk:dir/file'
|
| 178 |
+
"""
|
| 179 |
+
try:
|
| 180 |
+
protocol, name = split_resource_url(resource_url)
|
| 181 |
+
except ValueError:
|
| 182 |
+
# the resource url has no protocol, use the nltk protocol by default
|
| 183 |
+
protocol = "nltk"
|
| 184 |
+
name = resource_url
|
| 185 |
+
# use file protocol if the path is an absolute path
|
| 186 |
+
if protocol == "nltk" and os.path.isabs(name):
|
| 187 |
+
protocol = "file://"
|
| 188 |
+
name = normalize_resource_name(name, False, None)
|
| 189 |
+
elif protocol == "file":
|
| 190 |
+
protocol = "file://"
|
| 191 |
+
# name is absolute
|
| 192 |
+
name = normalize_resource_name(name, False, None)
|
| 193 |
+
elif protocol == "nltk":
|
| 194 |
+
protocol = "nltk:"
|
| 195 |
+
name = normalize_resource_name(name, True)
|
| 196 |
+
else:
|
| 197 |
+
# handled by urllib
|
| 198 |
+
protocol += "://"
|
| 199 |
+
return "".join([protocol, name])
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def normalize_resource_name(resource_name, allow_relative=True, relative_path=None):
|
| 203 |
+
"""
|
| 204 |
+
:type resource_name: str or unicode
|
| 205 |
+
:param resource_name: The name of the resource to search for.
|
| 206 |
+
Resource names are posix-style relative path names, such as
|
| 207 |
+
``corpora/brown``. Directory names will automatically
|
| 208 |
+
be converted to a platform-appropriate path separator.
|
| 209 |
+
Directory trailing slashes are preserved
|
| 210 |
+
|
| 211 |
+
>>> windows = sys.platform.startswith('win')
|
| 212 |
+
>>> normalize_resource_name('.', True)
|
| 213 |
+
'./'
|
| 214 |
+
>>> normalize_resource_name('./', True)
|
| 215 |
+
'./'
|
| 216 |
+
>>> windows or normalize_resource_name('dir/file', False, '/') == '/dir/file'
|
| 217 |
+
True
|
| 218 |
+
>>> not windows or normalize_resource_name('C:/file', False, '/') == '/C:/file'
|
| 219 |
+
True
|
| 220 |
+
>>> windows or normalize_resource_name('/dir/file', False, '/') == '/dir/file'
|
| 221 |
+
True
|
| 222 |
+
>>> windows or normalize_resource_name('../dir/file', False, '/') == '/dir/file'
|
| 223 |
+
True
|
| 224 |
+
>>> not windows or normalize_resource_name('/dir/file', True, '/') == 'dir/file'
|
| 225 |
+
True
|
| 226 |
+
>>> windows or normalize_resource_name('/dir/file', True, '/') == '/dir/file'
|
| 227 |
+
True
|
| 228 |
+
"""
|
| 229 |
+
is_dir = bool(re.search(r"[\\/.]$", resource_name)) or resource_name.endswith(
|
| 230 |
+
os.path.sep
|
| 231 |
+
)
|
| 232 |
+
if sys.platform.startswith("win"):
|
| 233 |
+
resource_name = resource_name.lstrip("/")
|
| 234 |
+
else:
|
| 235 |
+
resource_name = re.sub(r"^/+", "/", resource_name)
|
| 236 |
+
if allow_relative:
|
| 237 |
+
resource_name = os.path.normpath(resource_name)
|
| 238 |
+
else:
|
| 239 |
+
if relative_path is None:
|
| 240 |
+
relative_path = os.curdir
|
| 241 |
+
resource_name = os.path.abspath(os.path.join(relative_path, resource_name))
|
| 242 |
+
resource_name = resource_name.replace("\\", "/").replace(os.path.sep, "/")
|
| 243 |
+
if sys.platform.startswith("win") and os.path.isabs(resource_name):
|
| 244 |
+
resource_name = "/" + resource_name
|
| 245 |
+
if is_dir and not resource_name.endswith("/"):
|
| 246 |
+
resource_name += "/"
|
| 247 |
+
return resource_name
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
######################################################################
|
| 251 |
+
# Path Pointers
|
| 252 |
+
######################################################################
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
class PathPointer(metaclass=ABCMeta):
|
| 256 |
+
"""
|
| 257 |
+
An abstract base class for 'path pointers,' used by NLTK's data
|
| 258 |
+
package to identify specific paths. Two subclasses exist:
|
| 259 |
+
``FileSystemPathPointer`` identifies a file that can be accessed
|
| 260 |
+
directly via a given absolute path. ``ZipFilePathPointer``
|
| 261 |
+
identifies a file contained within a zipfile, that can be accessed
|
| 262 |
+
by reading that zipfile.
|
| 263 |
+
"""
|
| 264 |
+
|
| 265 |
+
@abstractmethod
|
| 266 |
+
def open(self, encoding=None):
|
| 267 |
+
"""
|
| 268 |
+
Return a seekable read-only stream that can be used to read
|
| 269 |
+
the contents of the file identified by this path pointer.
|
| 270 |
+
|
| 271 |
+
:raise IOError: If the path specified by this pointer does
|
| 272 |
+
not contain a readable file.
|
| 273 |
+
"""
|
| 274 |
+
|
| 275 |
+
@abstractmethod
|
| 276 |
+
def file_size(self):
|
| 277 |
+
"""
|
| 278 |
+
Return the size of the file pointed to by this path pointer,
|
| 279 |
+
in bytes.
|
| 280 |
+
|
| 281 |
+
:raise IOError: If the path specified by this pointer does
|
| 282 |
+
not contain a readable file.
|
| 283 |
+
"""
|
| 284 |
+
|
| 285 |
+
@abstractmethod
|
| 286 |
+
def join(self, fileid):
|
| 287 |
+
"""
|
| 288 |
+
Return a new path pointer formed by starting at the path
|
| 289 |
+
identified by this pointer, and then following the relative
|
| 290 |
+
path given by ``fileid``. The path components of ``fileid``
|
| 291 |
+
should be separated by forward slashes, regardless of
|
| 292 |
+
the underlying file system's path separator character.
|
| 293 |
+
"""
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
class FileSystemPathPointer(PathPointer, str):
|
| 297 |
+
"""
|
| 298 |
+
A path pointer that identifies a file which can be accessed
|
| 299 |
+
directly via a given absolute path.
|
| 300 |
+
"""
|
| 301 |
+
|
| 302 |
+
@py3_data
|
| 303 |
+
def __init__(self, _path):
|
| 304 |
+
"""
|
| 305 |
+
Create a new path pointer for the given absolute path.
|
| 306 |
+
|
| 307 |
+
:raise IOError: If the given path does not exist.
|
| 308 |
+
"""
|
| 309 |
+
|
| 310 |
+
_path = os.path.abspath(_path)
|
| 311 |
+
if not os.path.exists(_path):
|
| 312 |
+
raise OSError("No such file or directory: %r" % _path)
|
| 313 |
+
self._path = _path
|
| 314 |
+
|
| 315 |
+
# There's no need to call str.__init__(), since it's a no-op;
|
| 316 |
+
# str does all of its setup work in __new__.
|
| 317 |
+
|
| 318 |
+
@property
|
| 319 |
+
def path(self):
|
| 320 |
+
"""The absolute path identified by this path pointer."""
|
| 321 |
+
return self._path
|
| 322 |
+
|
| 323 |
+
def open(self, encoding=None):
|
| 324 |
+
stream = open(self._path, "rb")
|
| 325 |
+
if encoding is not None:
|
| 326 |
+
stream = SeekableUnicodeStreamReader(stream, encoding)
|
| 327 |
+
return stream
|
| 328 |
+
|
| 329 |
+
def file_size(self):
|
| 330 |
+
return os.stat(self._path).st_size
|
| 331 |
+
|
| 332 |
+
def join(self, fileid):
|
| 333 |
+
_path = os.path.join(self._path, fileid)
|
| 334 |
+
return FileSystemPathPointer(_path)
|
| 335 |
+
|
| 336 |
+
def __repr__(self):
|
| 337 |
+
return "FileSystemPathPointer(%r)" % self._path
|
| 338 |
+
|
| 339 |
+
def __str__(self):
|
| 340 |
+
return self._path
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
@deprecated("Use gzip.GzipFile instead as it also uses a buffer.")
|
| 344 |
+
class BufferedGzipFile(GzipFile):
|
| 345 |
+
"""A ``GzipFile`` subclass for compatibility with older nltk releases.
|
| 346 |
+
|
| 347 |
+
Use ``GzipFile`` directly as it also buffers in all supported
|
| 348 |
+
Python versions.
|
| 349 |
+
"""
|
| 350 |
+
|
| 351 |
+
@py3_data
|
| 352 |
+
def __init__(
|
| 353 |
+
self, filename=None, mode=None, compresslevel=9, fileobj=None, **kwargs
|
| 354 |
+
):
|
| 355 |
+
"""Return a buffered gzip file object."""
|
| 356 |
+
GzipFile.__init__(self, filename, mode, compresslevel, fileobj)
|
| 357 |
+
|
| 358 |
+
def write(self, data):
|
| 359 |
+
# This is identical to GzipFile.write but does not return
|
| 360 |
+
# the bytes written to retain compatibility.
|
| 361 |
+
super().write(data)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class GzipFileSystemPathPointer(FileSystemPathPointer):
|
| 365 |
+
"""
|
| 366 |
+
A subclass of ``FileSystemPathPointer`` that identifies a gzip-compressed
|
| 367 |
+
file located at a given absolute path. ``GzipFileSystemPathPointer`` is
|
| 368 |
+
appropriate for loading large gzip-compressed pickle objects efficiently.
|
| 369 |
+
"""
|
| 370 |
+
|
| 371 |
+
def open(self, encoding=None):
|
| 372 |
+
stream = GzipFile(self._path, "rb")
|
| 373 |
+
if encoding:
|
| 374 |
+
stream = SeekableUnicodeStreamReader(stream, encoding)
|
| 375 |
+
return stream
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
class ZipFilePathPointer(PathPointer):
|
| 379 |
+
"""
|
| 380 |
+
A path pointer that identifies a file contained within a zipfile,
|
| 381 |
+
which can be accessed by reading that zipfile.
|
| 382 |
+
"""
|
| 383 |
+
|
| 384 |
+
@py3_data
|
| 385 |
+
def __init__(self, zipfile, entry=""):
|
| 386 |
+
"""
|
| 387 |
+
Create a new path pointer pointing at the specified entry
|
| 388 |
+
in the given zipfile.
|
| 389 |
+
|
| 390 |
+
:raise IOError: If the given zipfile does not exist, or if it
|
| 391 |
+
does not contain the specified entry.
|
| 392 |
+
"""
|
| 393 |
+
if isinstance(zipfile, str):
|
| 394 |
+
zipfile = OpenOnDemandZipFile(os.path.abspath(zipfile))
|
| 395 |
+
|
| 396 |
+
# Check that the entry exists:
|
| 397 |
+
if entry:
|
| 398 |
+
|
| 399 |
+
# Normalize the entry string, it should be relative:
|
| 400 |
+
entry = normalize_resource_name(entry, True, "/").lstrip("/")
|
| 401 |
+
|
| 402 |
+
try:
|
| 403 |
+
zipfile.getinfo(entry)
|
| 404 |
+
except Exception as e:
|
| 405 |
+
# Sometimes directories aren't explicitly listed in
|
| 406 |
+
# the zip file. So if `entry` is a directory name,
|
| 407 |
+
# then check if the zipfile contains any files that
|
| 408 |
+
# are under the given directory.
|
| 409 |
+
if entry.endswith("/") and [
|
| 410 |
+
n for n in zipfile.namelist() if n.startswith(entry)
|
| 411 |
+
]:
|
| 412 |
+
pass # zipfile contains a file in that directory.
|
| 413 |
+
else:
|
| 414 |
+
# Otherwise, complain.
|
| 415 |
+
raise OSError(
|
| 416 |
+
f"Zipfile {zipfile.filename!r} does not contain {entry!r}"
|
| 417 |
+
) from e
|
| 418 |
+
self._zipfile = zipfile
|
| 419 |
+
self._entry = entry
|
| 420 |
+
|
| 421 |
+
@property
|
| 422 |
+
def zipfile(self):
|
| 423 |
+
"""
|
| 424 |
+
The zipfile.ZipFile object used to access the zip file
|
| 425 |
+
containing the entry identified by this path pointer.
|
| 426 |
+
"""
|
| 427 |
+
return self._zipfile
|
| 428 |
+
|
| 429 |
+
@property
|
| 430 |
+
def entry(self):
|
| 431 |
+
"""
|
| 432 |
+
The name of the file within zipfile that this path
|
| 433 |
+
pointer points to.
|
| 434 |
+
"""
|
| 435 |
+
return self._entry
|
| 436 |
+
|
| 437 |
+
def open(self, encoding=None):
|
| 438 |
+
data = self._zipfile.read(self._entry)
|
| 439 |
+
stream = BytesIO(data)
|
| 440 |
+
if self._entry.endswith(".gz"):
|
| 441 |
+
stream = GzipFile(self._entry, fileobj=stream)
|
| 442 |
+
elif encoding is not None:
|
| 443 |
+
stream = SeekableUnicodeStreamReader(stream, encoding)
|
| 444 |
+
return stream
|
| 445 |
+
|
| 446 |
+
def file_size(self):
|
| 447 |
+
return self._zipfile.getinfo(self._entry).file_size
|
| 448 |
+
|
| 449 |
+
def join(self, fileid):
|
| 450 |
+
entry = f"{self._entry}/{fileid}"
|
| 451 |
+
return ZipFilePathPointer(self._zipfile, entry)
|
| 452 |
+
|
| 453 |
+
def __repr__(self):
|
| 454 |
+
return f"ZipFilePathPointer({self._zipfile.filename!r}, {self._entry!r})"
|
| 455 |
+
|
| 456 |
+
def __str__(self):
|
| 457 |
+
return os.path.normpath(os.path.join(self._zipfile.filename, self._entry))
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
######################################################################
|
| 461 |
+
# Access Functions
|
| 462 |
+
######################################################################
|
| 463 |
+
|
| 464 |
+
# Don't use a weak dictionary, because in the common case this
|
| 465 |
+
# causes a lot more reloading that necessary.
|
| 466 |
+
_resource_cache = {}
|
| 467 |
+
"""A dictionary used to cache resources so that they won't
|
| 468 |
+
need to be loaded more than once."""
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
def find(resource_name, paths=None):
|
| 472 |
+
"""
|
| 473 |
+
Find the given resource by searching through the directories and
|
| 474 |
+
zip files in paths, where a None or empty string specifies an absolute path.
|
| 475 |
+
Returns a corresponding path name. If the given resource is not
|
| 476 |
+
found, raise a ``LookupError``, whose message gives a pointer to
|
| 477 |
+
the installation instructions for the NLTK downloader.
|
| 478 |
+
|
| 479 |
+
Zip File Handling:
|
| 480 |
+
|
| 481 |
+
- If ``resource_name`` contains a component with a ``.zip``
|
| 482 |
+
extension, then it is assumed to be a zipfile; and the
|
| 483 |
+
remaining path components are used to look inside the zipfile.
|
| 484 |
+
|
| 485 |
+
- If any element of ``nltk.data.path`` has a ``.zip`` extension,
|
| 486 |
+
then it is assumed to be a zipfile.
|
| 487 |
+
|
| 488 |
+
- If a given resource name that does not contain any zipfile
|
| 489 |
+
component is not found initially, then ``find()`` will make a
|
| 490 |
+
second attempt to find that resource, by replacing each
|
| 491 |
+
component *p* in the path with *p.zip/p*. For example, this
|
| 492 |
+
allows ``find()`` to map the resource name
|
| 493 |
+
``corpora/chat80/cities.pl`` to a zip file path pointer to
|
| 494 |
+
``corpora/chat80.zip/chat80/cities.pl``.
|
| 495 |
+
|
| 496 |
+
- When using ``find()`` to locate a directory contained in a
|
| 497 |
+
zipfile, the resource name must end with the forward slash
|
| 498 |
+
character. Otherwise, ``find()`` will not locate the
|
| 499 |
+
directory.
|
| 500 |
+
|
| 501 |
+
:type resource_name: str or unicode
|
| 502 |
+
:param resource_name: The name of the resource to search for.
|
| 503 |
+
Resource names are posix-style relative path names, such as
|
| 504 |
+
``corpora/brown``. Directory names will be
|
| 505 |
+
automatically converted to a platform-appropriate path separator.
|
| 506 |
+
:rtype: str
|
| 507 |
+
"""
|
| 508 |
+
resource_name = normalize_resource_name(resource_name, True)
|
| 509 |
+
|
| 510 |
+
# Resolve default paths at runtime in-case the user overrides
|
| 511 |
+
# nltk.data.path
|
| 512 |
+
if paths is None:
|
| 513 |
+
paths = path
|
| 514 |
+
|
| 515 |
+
# Check if the resource name includes a zipfile name
|
| 516 |
+
m = re.match(r"(.*\.zip)/?(.*)$|", resource_name)
|
| 517 |
+
zipfile, zipentry = m.groups()
|
| 518 |
+
|
| 519 |
+
# Check each item in our path
|
| 520 |
+
for path_ in paths:
|
| 521 |
+
# Is the path item a zipfile?
|
| 522 |
+
if path_ and (os.path.isfile(path_) and path_.endswith(".zip")):
|
| 523 |
+
try:
|
| 524 |
+
return ZipFilePathPointer(path_, resource_name)
|
| 525 |
+
except OSError:
|
| 526 |
+
# resource not in zipfile
|
| 527 |
+
continue
|
| 528 |
+
|
| 529 |
+
# Is the path item a directory or is resource_name an absolute path?
|
| 530 |
+
elif not path_ or os.path.isdir(path_):
|
| 531 |
+
if zipfile is None:
|
| 532 |
+
p = os.path.join(path_, url2pathname(resource_name))
|
| 533 |
+
if os.path.exists(p):
|
| 534 |
+
if p.endswith(".gz"):
|
| 535 |
+
return GzipFileSystemPathPointer(p)
|
| 536 |
+
else:
|
| 537 |
+
return FileSystemPathPointer(p)
|
| 538 |
+
else:
|
| 539 |
+
p = os.path.join(path_, url2pathname(zipfile))
|
| 540 |
+
if os.path.exists(p):
|
| 541 |
+
try:
|
| 542 |
+
return ZipFilePathPointer(p, zipentry)
|
| 543 |
+
except OSError:
|
| 544 |
+
# resource not in zipfile
|
| 545 |
+
continue
|
| 546 |
+
|
| 547 |
+
# Fallback: if the path doesn't include a zip file, then try
|
| 548 |
+
# again, assuming that one of the path components is inside a
|
| 549 |
+
# zipfile of the same name.
|
| 550 |
+
if zipfile is None:
|
| 551 |
+
pieces = resource_name.split("/")
|
| 552 |
+
for i in range(len(pieces)):
|
| 553 |
+
modified_name = "/".join(pieces[:i] + [pieces[i] + ".zip"] + pieces[i:])
|
| 554 |
+
try:
|
| 555 |
+
return find(modified_name, paths)
|
| 556 |
+
except LookupError:
|
| 557 |
+
pass
|
| 558 |
+
|
| 559 |
+
# Identify the package (i.e. the .zip file) to download.
|
| 560 |
+
resource_zipname = resource_name.split("/")[1]
|
| 561 |
+
if resource_zipname.endswith(".zip"):
|
| 562 |
+
resource_zipname = resource_zipname.rpartition(".")[0]
|
| 563 |
+
# Display a friendly error message if the resource wasn't found:
|
| 564 |
+
msg = str(
|
| 565 |
+
"Resource \33[93m{resource}\033[0m not found.\n"
|
| 566 |
+
"Please use the NLTK Downloader to obtain the resource:\n\n"
|
| 567 |
+
"\33[31m" # To display red text in terminal.
|
| 568 |
+
">>> import nltk\n"
|
| 569 |
+
">>> nltk.download('{resource}')\n"
|
| 570 |
+
"\033[0m"
|
| 571 |
+
).format(resource=resource_zipname)
|
| 572 |
+
msg = textwrap_indent(msg)
|
| 573 |
+
|
| 574 |
+
msg += "\n For more information see: https://www.nltk.org/data.html\n"
|
| 575 |
+
|
| 576 |
+
msg += "\n Attempted to load \33[93m{resource_name}\033[0m\n".format(
|
| 577 |
+
resource_name=resource_name
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
msg += "\n Searched in:" + "".join("\n - %r" % d for d in paths)
|
| 581 |
+
sep = "*" * 70
|
| 582 |
+
resource_not_found = f"\n{sep}\n{msg}\n{sep}\n"
|
| 583 |
+
raise LookupError(resource_not_found)
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
def retrieve(resource_url, filename=None, verbose=True):
|
| 587 |
+
"""
|
| 588 |
+
Copy the given resource to a local file. If no filename is
|
| 589 |
+
specified, then use the URL's filename. If there is already a
|
| 590 |
+
file named ``filename``, then raise a ``ValueError``.
|
| 591 |
+
|
| 592 |
+
:type resource_url: str
|
| 593 |
+
:param resource_url: A URL specifying where the resource should be
|
| 594 |
+
loaded from. The default protocol is "nltk:", which searches
|
| 595 |
+
for the file in the the NLTK data package.
|
| 596 |
+
"""
|
| 597 |
+
resource_url = normalize_resource_url(resource_url)
|
| 598 |
+
if filename is None:
|
| 599 |
+
if resource_url.startswith("file:"):
|
| 600 |
+
filename = os.path.split(resource_url)[-1]
|
| 601 |
+
else:
|
| 602 |
+
filename = re.sub(r"(^\w+:)?.*/", "", resource_url)
|
| 603 |
+
if os.path.exists(filename):
|
| 604 |
+
filename = os.path.abspath(filename)
|
| 605 |
+
raise ValueError("File %r already exists!" % filename)
|
| 606 |
+
|
| 607 |
+
if verbose:
|
| 608 |
+
print(f"Retrieving {resource_url!r}, saving to {filename!r}")
|
| 609 |
+
|
| 610 |
+
# Open the input & output streams.
|
| 611 |
+
infile = _open(resource_url)
|
| 612 |
+
|
| 613 |
+
# Copy infile -> outfile, using 64k blocks.
|
| 614 |
+
with open(filename, "wb") as outfile:
|
| 615 |
+
while True:
|
| 616 |
+
s = infile.read(1024 * 64) # 64k blocks.
|
| 617 |
+
outfile.write(s)
|
| 618 |
+
if not s:
|
| 619 |
+
break
|
| 620 |
+
|
| 621 |
+
infile.close()
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
#: A dictionary describing the formats that are supported by NLTK's
|
| 625 |
+
#: load() method. Keys are format names, and values are format
|
| 626 |
+
#: descriptions.
|
| 627 |
+
FORMATS = {
|
| 628 |
+
"pickle": "A serialized python object, stored using the pickle module.",
|
| 629 |
+
"json": "A serialized python object, stored using the json module.",
|
| 630 |
+
"yaml": "A serialized python object, stored using the yaml module.",
|
| 631 |
+
"cfg": "A context free grammar.",
|
| 632 |
+
"pcfg": "A probabilistic CFG.",
|
| 633 |
+
"fcfg": "A feature CFG.",
|
| 634 |
+
"fol": "A list of first order logic expressions, parsed with "
|
| 635 |
+
"nltk.sem.logic.Expression.fromstring.",
|
| 636 |
+
"logic": "A list of first order logic expressions, parsed with "
|
| 637 |
+
"nltk.sem.logic.LogicParser. Requires an additional logic_parser "
|
| 638 |
+
"parameter",
|
| 639 |
+
"val": "A semantic valuation, parsed by nltk.sem.Valuation.fromstring.",
|
| 640 |
+
"raw": "The raw (byte string) contents of a file.",
|
| 641 |
+
"text": "The raw (unicode string) contents of a file. ",
|
| 642 |
+
}
|
| 643 |
+
|
| 644 |
+
#: A dictionary mapping from file extensions to format names, used
|
| 645 |
+
#: by load() when format="auto" to decide the format for a
|
| 646 |
+
#: given resource url.
|
| 647 |
+
AUTO_FORMATS = {
|
| 648 |
+
"pickle": "pickle",
|
| 649 |
+
"json": "json",
|
| 650 |
+
"yaml": "yaml",
|
| 651 |
+
"cfg": "cfg",
|
| 652 |
+
"pcfg": "pcfg",
|
| 653 |
+
"fcfg": "fcfg",
|
| 654 |
+
"fol": "fol",
|
| 655 |
+
"logic": "logic",
|
| 656 |
+
"val": "val",
|
| 657 |
+
"txt": "text",
|
| 658 |
+
"text": "text",
|
| 659 |
+
}
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def load(
|
| 663 |
+
resource_url,
|
| 664 |
+
format="auto",
|
| 665 |
+
cache=True,
|
| 666 |
+
verbose=False,
|
| 667 |
+
logic_parser=None,
|
| 668 |
+
fstruct_reader=None,
|
| 669 |
+
encoding=None,
|
| 670 |
+
):
|
| 671 |
+
"""
|
| 672 |
+
Load a given resource from the NLTK data package. The following
|
| 673 |
+
resource formats are currently supported:
|
| 674 |
+
|
| 675 |
+
- ``pickle``
|
| 676 |
+
- ``json``
|
| 677 |
+
- ``yaml``
|
| 678 |
+
- ``cfg`` (context free grammars)
|
| 679 |
+
- ``pcfg`` (probabilistic CFGs)
|
| 680 |
+
- ``fcfg`` (feature-based CFGs)
|
| 681 |
+
- ``fol`` (formulas of First Order Logic)
|
| 682 |
+
- ``logic`` (Logical formulas to be parsed by the given logic_parser)
|
| 683 |
+
- ``val`` (valuation of First Order Logic model)
|
| 684 |
+
- ``text`` (the file contents as a unicode string)
|
| 685 |
+
- ``raw`` (the raw file contents as a byte string)
|
| 686 |
+
|
| 687 |
+
If no format is specified, ``load()`` will attempt to determine a
|
| 688 |
+
format based on the resource name's file extension. If that
|
| 689 |
+
fails, ``load()`` will raise a ``ValueError`` exception.
|
| 690 |
+
|
| 691 |
+
For all text formats (everything except ``pickle``, ``json``, ``yaml`` and ``raw``),
|
| 692 |
+
it tries to decode the raw contents using UTF-8, and if that doesn't
|
| 693 |
+
work, it tries with ISO-8859-1 (Latin-1), unless the ``encoding``
|
| 694 |
+
is specified.
|
| 695 |
+
|
| 696 |
+
:type resource_url: str
|
| 697 |
+
:param resource_url: A URL specifying where the resource should be
|
| 698 |
+
loaded from. The default protocol is "nltk:", which searches
|
| 699 |
+
for the file in the the NLTK data package.
|
| 700 |
+
:type cache: bool
|
| 701 |
+
:param cache: If true, add this resource to a cache. If load()
|
| 702 |
+
finds a resource in its cache, then it will return it from the
|
| 703 |
+
cache rather than loading it.
|
| 704 |
+
:type verbose: bool
|
| 705 |
+
:param verbose: If true, print a message when loading a resource.
|
| 706 |
+
Messages are not displayed when a resource is retrieved from
|
| 707 |
+
the cache.
|
| 708 |
+
:type logic_parser: LogicParser
|
| 709 |
+
:param logic_parser: The parser that will be used to parse logical
|
| 710 |
+
expressions.
|
| 711 |
+
:type fstruct_reader: FeatStructReader
|
| 712 |
+
:param fstruct_reader: The parser that will be used to parse the
|
| 713 |
+
feature structure of an fcfg.
|
| 714 |
+
:type encoding: str
|
| 715 |
+
:param encoding: the encoding of the input; only used for text formats.
|
| 716 |
+
"""
|
| 717 |
+
resource_url = normalize_resource_url(resource_url)
|
| 718 |
+
resource_url = add_py3_data(resource_url)
|
| 719 |
+
|
| 720 |
+
# Determine the format of the resource.
|
| 721 |
+
if format == "auto":
|
| 722 |
+
resource_url_parts = resource_url.split(".")
|
| 723 |
+
ext = resource_url_parts[-1]
|
| 724 |
+
if ext == "gz":
|
| 725 |
+
ext = resource_url_parts[-2]
|
| 726 |
+
format = AUTO_FORMATS.get(ext)
|
| 727 |
+
if format is None:
|
| 728 |
+
raise ValueError(
|
| 729 |
+
"Could not determine format for %s based "
|
| 730 |
+
'on its file\nextension; use the "format" '
|
| 731 |
+
"argument to specify the format explicitly." % resource_url
|
| 732 |
+
)
|
| 733 |
+
|
| 734 |
+
if format not in FORMATS:
|
| 735 |
+
raise ValueError(f"Unknown format type: {format}!")
|
| 736 |
+
|
| 737 |
+
# If we've cached the resource, then just return it.
|
| 738 |
+
if cache:
|
| 739 |
+
resource_val = _resource_cache.get((resource_url, format))
|
| 740 |
+
if resource_val is not None:
|
| 741 |
+
if verbose:
|
| 742 |
+
print(f"<<Using cached copy of {resource_url}>>")
|
| 743 |
+
return resource_val
|
| 744 |
+
|
| 745 |
+
# Let the user know what's going on.
|
| 746 |
+
if verbose:
|
| 747 |
+
print(f"<<Loading {resource_url}>>")
|
| 748 |
+
|
| 749 |
+
# Load the resource.
|
| 750 |
+
opened_resource = _open(resource_url)
|
| 751 |
+
|
| 752 |
+
if format == "raw":
|
| 753 |
+
resource_val = opened_resource.read()
|
| 754 |
+
elif format == "pickle":
|
| 755 |
+
resource_val = pickle.load(opened_resource)
|
| 756 |
+
elif format == "json":
|
| 757 |
+
import json
|
| 758 |
+
|
| 759 |
+
from nltk.jsontags import json_tags
|
| 760 |
+
|
| 761 |
+
resource_val = json.load(opened_resource)
|
| 762 |
+
tag = None
|
| 763 |
+
if len(resource_val) != 1:
|
| 764 |
+
tag = next(resource_val.keys())
|
| 765 |
+
if tag not in json_tags:
|
| 766 |
+
raise ValueError("Unknown json tag.")
|
| 767 |
+
elif format == "yaml":
|
| 768 |
+
import yaml
|
| 769 |
+
|
| 770 |
+
resource_val = yaml.safe_load(opened_resource)
|
| 771 |
+
else:
|
| 772 |
+
# The resource is a text format.
|
| 773 |
+
binary_data = opened_resource.read()
|
| 774 |
+
if encoding is not None:
|
| 775 |
+
string_data = binary_data.decode(encoding)
|
| 776 |
+
else:
|
| 777 |
+
try:
|
| 778 |
+
string_data = binary_data.decode("utf-8")
|
| 779 |
+
except UnicodeDecodeError:
|
| 780 |
+
string_data = binary_data.decode("latin-1")
|
| 781 |
+
if format == "text":
|
| 782 |
+
resource_val = string_data
|
| 783 |
+
elif format == "cfg":
|
| 784 |
+
resource_val = grammar.CFG.fromstring(string_data, encoding=encoding)
|
| 785 |
+
elif format == "pcfg":
|
| 786 |
+
resource_val = grammar.PCFG.fromstring(string_data, encoding=encoding)
|
| 787 |
+
elif format == "fcfg":
|
| 788 |
+
resource_val = grammar.FeatureGrammar.fromstring(
|
| 789 |
+
string_data,
|
| 790 |
+
logic_parser=logic_parser,
|
| 791 |
+
fstruct_reader=fstruct_reader,
|
| 792 |
+
encoding=encoding,
|
| 793 |
+
)
|
| 794 |
+
elif format == "fol":
|
| 795 |
+
resource_val = sem.read_logic(
|
| 796 |
+
string_data,
|
| 797 |
+
logic_parser=sem.logic.LogicParser(),
|
| 798 |
+
encoding=encoding,
|
| 799 |
+
)
|
| 800 |
+
elif format == "logic":
|
| 801 |
+
resource_val = sem.read_logic(
|
| 802 |
+
string_data, logic_parser=logic_parser, encoding=encoding
|
| 803 |
+
)
|
| 804 |
+
elif format == "val":
|
| 805 |
+
resource_val = sem.read_valuation(string_data, encoding=encoding)
|
| 806 |
+
else:
|
| 807 |
+
raise AssertionError(
|
| 808 |
+
"Internal NLTK error: Format %s isn't "
|
| 809 |
+
"handled by nltk.data.load()" % (format,)
|
| 810 |
+
)
|
| 811 |
+
|
| 812 |
+
opened_resource.close()
|
| 813 |
+
|
| 814 |
+
# If requested, add it to the cache.
|
| 815 |
+
if cache:
|
| 816 |
+
try:
|
| 817 |
+
_resource_cache[(resource_url, format)] = resource_val
|
| 818 |
+
# TODO: add this line
|
| 819 |
+
# print('<<Caching a copy of %s>>' % (resource_url,))
|
| 820 |
+
except TypeError:
|
| 821 |
+
# We can't create weak references to some object types, like
|
| 822 |
+
# strings and tuples. For now, just don't cache them.
|
| 823 |
+
pass
|
| 824 |
+
|
| 825 |
+
return resource_val
|
| 826 |
+
|
| 827 |
+
|
| 828 |
+
def show_cfg(resource_url, escape="##"):
|
| 829 |
+
"""
|
| 830 |
+
Write out a grammar file, ignoring escaped and empty lines.
|
| 831 |
+
|
| 832 |
+
:type resource_url: str
|
| 833 |
+
:param resource_url: A URL specifying where the resource should be
|
| 834 |
+
loaded from. The default protocol is "nltk:", which searches
|
| 835 |
+
for the file in the the NLTK data package.
|
| 836 |
+
:type escape: str
|
| 837 |
+
:param escape: Prepended string that signals lines to be ignored
|
| 838 |
+
"""
|
| 839 |
+
resource_url = normalize_resource_url(resource_url)
|
| 840 |
+
resource_val = load(resource_url, format="text", cache=False)
|
| 841 |
+
lines = resource_val.splitlines()
|
| 842 |
+
for l in lines:
|
| 843 |
+
if l.startswith(escape):
|
| 844 |
+
continue
|
| 845 |
+
if re.match("^$", l):
|
| 846 |
+
continue
|
| 847 |
+
print(l)
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
def clear_cache():
|
| 851 |
+
"""
|
| 852 |
+
Remove all objects from the resource cache.
|
| 853 |
+
:see: load()
|
| 854 |
+
"""
|
| 855 |
+
_resource_cache.clear()
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
def _open(resource_url):
|
| 859 |
+
"""
|
| 860 |
+
Helper function that returns an open file object for a resource,
|
| 861 |
+
given its resource URL. If the given resource URL uses the "nltk:"
|
| 862 |
+
protocol, or uses no protocol, then use ``nltk.data.find`` to find
|
| 863 |
+
its path, and open it with the given mode; if the resource URL
|
| 864 |
+
uses the 'file' protocol, then open the file with the given mode;
|
| 865 |
+
otherwise, delegate to ``urllib2.urlopen``.
|
| 866 |
+
|
| 867 |
+
:type resource_url: str
|
| 868 |
+
:param resource_url: A URL specifying where the resource should be
|
| 869 |
+
loaded from. The default protocol is "nltk:", which searches
|
| 870 |
+
for the file in the the NLTK data package.
|
| 871 |
+
"""
|
| 872 |
+
resource_url = normalize_resource_url(resource_url)
|
| 873 |
+
protocol, path_ = split_resource_url(resource_url)
|
| 874 |
+
|
| 875 |
+
if protocol is None or protocol.lower() == "nltk":
|
| 876 |
+
return find(path_, path + [""]).open()
|
| 877 |
+
elif protocol.lower() == "file":
|
| 878 |
+
# urllib might not use mode='rb', so handle this one ourselves:
|
| 879 |
+
return find(path_, [""]).open()
|
| 880 |
+
else:
|
| 881 |
+
return urlopen(resource_url)
|
| 882 |
+
|
| 883 |
+
|
| 884 |
+
######################################################################
|
| 885 |
+
# Lazy Resource Loader
|
| 886 |
+
######################################################################
|
| 887 |
+
|
| 888 |
+
|
| 889 |
+
class LazyLoader:
|
| 890 |
+
@py3_data
|
| 891 |
+
def __init__(self, _path):
|
| 892 |
+
self._path = _path
|
| 893 |
+
|
| 894 |
+
def __load(self):
|
| 895 |
+
resource = load(self._path)
|
| 896 |
+
# This is where the magic happens! Transform ourselves into
|
| 897 |
+
# the object by modifying our own __dict__ and __class__ to
|
| 898 |
+
# match that of `resource`.
|
| 899 |
+
self.__dict__ = resource.__dict__
|
| 900 |
+
self.__class__ = resource.__class__
|
| 901 |
+
|
| 902 |
+
def __getattr__(self, attr):
|
| 903 |
+
self.__load()
|
| 904 |
+
# This looks circular, but its not, since __load() changes our
|
| 905 |
+
# __class__ to something new:
|
| 906 |
+
return getattr(self, attr)
|
| 907 |
+
|
| 908 |
+
def __repr__(self):
|
| 909 |
+
self.__load()
|
| 910 |
+
# This looks circular, but its not, since __load() changes our
|
| 911 |
+
# __class__ to something new:
|
| 912 |
+
return repr(self)
|
| 913 |
+
|
| 914 |
+
|
| 915 |
+
######################################################################
|
| 916 |
+
# Open-On-Demand ZipFile
|
| 917 |
+
######################################################################
|
| 918 |
+
|
| 919 |
+
|
| 920 |
+
class OpenOnDemandZipFile(zipfile.ZipFile):
|
| 921 |
+
"""
|
| 922 |
+
A subclass of ``zipfile.ZipFile`` that closes its file pointer
|
| 923 |
+
whenever it is not using it; and re-opens it when it needs to read
|
| 924 |
+
data from the zipfile. This is useful for reducing the number of
|
| 925 |
+
open file handles when many zip files are being accessed at once.
|
| 926 |
+
``OpenOnDemandZipFile`` must be constructed from a filename, not a
|
| 927 |
+
file-like object (to allow re-opening). ``OpenOnDemandZipFile`` is
|
| 928 |
+
read-only (i.e. ``write()`` and ``writestr()`` are disabled.
|
| 929 |
+
"""
|
| 930 |
+
|
| 931 |
+
@py3_data
|
| 932 |
+
def __init__(self, filename):
|
| 933 |
+
if not isinstance(filename, str):
|
| 934 |
+
raise TypeError("ReopenableZipFile filename must be a string")
|
| 935 |
+
zipfile.ZipFile.__init__(self, filename)
|
| 936 |
+
assert self.filename == filename
|
| 937 |
+
self.close()
|
| 938 |
+
# After closing a ZipFile object, the _fileRefCnt needs to be cleared
|
| 939 |
+
# for Python2and3 compatible code.
|
| 940 |
+
self._fileRefCnt = 0
|
| 941 |
+
|
| 942 |
+
def read(self, name):
|
| 943 |
+
assert self.fp is None
|
| 944 |
+
self.fp = open(self.filename, "rb")
|
| 945 |
+
value = zipfile.ZipFile.read(self, name)
|
| 946 |
+
# Ensure that _fileRefCnt needs to be set for Python2and3 compatible code.
|
| 947 |
+
# Since we only opened one file here, we add 1.
|
| 948 |
+
self._fileRefCnt += 1
|
| 949 |
+
self.close()
|
| 950 |
+
return value
|
| 951 |
+
|
| 952 |
+
def write(self, *args, **kwargs):
|
| 953 |
+
""":raise NotImplementedError: OpenOnDemandZipfile is read-only"""
|
| 954 |
+
raise NotImplementedError("OpenOnDemandZipfile is read-only")
|
| 955 |
+
|
| 956 |
+
def writestr(self, *args, **kwargs):
|
| 957 |
+
""":raise NotImplementedError: OpenOnDemandZipfile is read-only"""
|
| 958 |
+
raise NotImplementedError("OpenOnDemandZipfile is read-only")
|
| 959 |
+
|
| 960 |
+
def __repr__(self):
|
| 961 |
+
return repr("OpenOnDemandZipFile(%r)" % self.filename)
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
######################################################################
|
| 965 |
+
# Seekable Unicode Stream Reader
|
| 966 |
+
######################################################################
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
class SeekableUnicodeStreamReader:
|
| 970 |
+
"""
|
| 971 |
+
A stream reader that automatically encodes the source byte stream
|
| 972 |
+
into unicode (like ``codecs.StreamReader``); but still supports the
|
| 973 |
+
``seek()`` and ``tell()`` operations correctly. This is in contrast
|
| 974 |
+
to ``codecs.StreamReader``, which provide *broken* ``seek()`` and
|
| 975 |
+
``tell()`` methods.
|
| 976 |
+
|
| 977 |
+
This class was motivated by ``StreamBackedCorpusView``, which
|
| 978 |
+
makes extensive use of ``seek()`` and ``tell()``, and needs to be
|
| 979 |
+
able to handle unicode-encoded files.
|
| 980 |
+
|
| 981 |
+
Note: this class requires stateless decoders. To my knowledge,
|
| 982 |
+
this shouldn't cause a problem with any of python's builtin
|
| 983 |
+
unicode encodings.
|
| 984 |
+
"""
|
| 985 |
+
|
| 986 |
+
DEBUG = True # : If true, then perform extra sanity checks.
|
| 987 |
+
|
| 988 |
+
@py3_data
|
| 989 |
+
def __init__(self, stream, encoding, errors="strict"):
|
| 990 |
+
# Rewind the stream to its beginning.
|
| 991 |
+
stream.seek(0)
|
| 992 |
+
|
| 993 |
+
self.stream = stream
|
| 994 |
+
"""The underlying stream."""
|
| 995 |
+
|
| 996 |
+
self.encoding = encoding
|
| 997 |
+
"""The name of the encoding that should be used to encode the
|
| 998 |
+
underlying stream."""
|
| 999 |
+
|
| 1000 |
+
self.errors = errors
|
| 1001 |
+
"""The error mode that should be used when decoding data from
|
| 1002 |
+
the underlying stream. Can be 'strict', 'ignore', or
|
| 1003 |
+
'replace'."""
|
| 1004 |
+
|
| 1005 |
+
self.decode = codecs.getdecoder(encoding)
|
| 1006 |
+
"""The function that is used to decode byte strings into
|
| 1007 |
+
unicode strings."""
|
| 1008 |
+
|
| 1009 |
+
self.bytebuffer = b""
|
| 1010 |
+
"""A buffer to use bytes that have been read but have not yet
|
| 1011 |
+
been decoded. This is only used when the final bytes from
|
| 1012 |
+
a read do not form a complete encoding for a character."""
|
| 1013 |
+
|
| 1014 |
+
self.linebuffer = None
|
| 1015 |
+
"""A buffer used by ``readline()`` to hold characters that have
|
| 1016 |
+
been read, but have not yet been returned by ``read()`` or
|
| 1017 |
+
``readline()``. This buffer consists of a list of unicode
|
| 1018 |
+
strings, where each string corresponds to a single line.
|
| 1019 |
+
The final element of the list may or may not be a complete
|
| 1020 |
+
line. Note that the existence of a linebuffer makes the
|
| 1021 |
+
``tell()`` operation more complex, because it must backtrack
|
| 1022 |
+
to the beginning of the buffer to determine the correct
|
| 1023 |
+
file position in the underlying byte stream."""
|
| 1024 |
+
|
| 1025 |
+
self._rewind_checkpoint = 0
|
| 1026 |
+
"""The file position at which the most recent read on the
|
| 1027 |
+
underlying stream began. This is used, together with
|
| 1028 |
+
``_rewind_numchars``, to backtrack to the beginning of
|
| 1029 |
+
``linebuffer`` (which is required by ``tell()``)."""
|
| 1030 |
+
|
| 1031 |
+
self._rewind_numchars = None
|
| 1032 |
+
"""The number of characters that have been returned since the
|
| 1033 |
+
read that started at ``_rewind_checkpoint``. This is used,
|
| 1034 |
+
together with ``_rewind_checkpoint``, to backtrack to the
|
| 1035 |
+
beginning of ``linebuffer`` (which is required by ``tell()``)."""
|
| 1036 |
+
|
| 1037 |
+
self._bom = self._check_bom()
|
| 1038 |
+
"""The length of the byte order marker at the beginning of
|
| 1039 |
+
the stream (or None for no byte order marker)."""
|
| 1040 |
+
|
| 1041 |
+
# /////////////////////////////////////////////////////////////////
|
| 1042 |
+
# Read methods
|
| 1043 |
+
# /////////////////////////////////////////////////////////////////
|
| 1044 |
+
|
| 1045 |
+
def read(self, size=None):
|
| 1046 |
+
"""
|
| 1047 |
+
Read up to ``size`` bytes, decode them using this reader's
|
| 1048 |
+
encoding, and return the resulting unicode string.
|
| 1049 |
+
|
| 1050 |
+
:param size: The maximum number of bytes to read. If not
|
| 1051 |
+
specified, then read as many bytes as possible.
|
| 1052 |
+
:type size: int
|
| 1053 |
+
:rtype: unicode
|
| 1054 |
+
"""
|
| 1055 |
+
chars = self._read(size)
|
| 1056 |
+
|
| 1057 |
+
# If linebuffer is not empty, then include it in the result
|
| 1058 |
+
if self.linebuffer:
|
| 1059 |
+
chars = "".join(self.linebuffer) + chars
|
| 1060 |
+
self.linebuffer = None
|
| 1061 |
+
self._rewind_numchars = None
|
| 1062 |
+
|
| 1063 |
+
return chars
|
| 1064 |
+
|
| 1065 |
+
def discard_line(self):
|
| 1066 |
+
if self.linebuffer and len(self.linebuffer) > 1:
|
| 1067 |
+
line = self.linebuffer.pop(0)
|
| 1068 |
+
self._rewind_numchars += len(line)
|
| 1069 |
+
else:
|
| 1070 |
+
self.stream.readline()
|
| 1071 |
+
|
| 1072 |
+
def readline(self, size=None):
|
| 1073 |
+
"""
|
| 1074 |
+
Read a line of text, decode it using this reader's encoding,
|
| 1075 |
+
and return the resulting unicode string.
|
| 1076 |
+
|
| 1077 |
+
:param size: The maximum number of bytes to read. If no
|
| 1078 |
+
newline is encountered before ``size`` bytes have been read,
|
| 1079 |
+
then the returned value may not be a complete line of text.
|
| 1080 |
+
:type size: int
|
| 1081 |
+
"""
|
| 1082 |
+
# If we have a non-empty linebuffer, then return the first
|
| 1083 |
+
# line from it. (Note that the last element of linebuffer may
|
| 1084 |
+
# not be a complete line; so let _read() deal with it.)
|
| 1085 |
+
if self.linebuffer and len(self.linebuffer) > 1:
|
| 1086 |
+
line = self.linebuffer.pop(0)
|
| 1087 |
+
self._rewind_numchars += len(line)
|
| 1088 |
+
return line
|
| 1089 |
+
|
| 1090 |
+
readsize = size or 72
|
| 1091 |
+
chars = ""
|
| 1092 |
+
|
| 1093 |
+
# If there's a remaining incomplete line in the buffer, add it.
|
| 1094 |
+
if self.linebuffer:
|
| 1095 |
+
chars += self.linebuffer.pop()
|
| 1096 |
+
self.linebuffer = None
|
| 1097 |
+
|
| 1098 |
+
while True:
|
| 1099 |
+
startpos = self.stream.tell() - len(self.bytebuffer)
|
| 1100 |
+
new_chars = self._read(readsize)
|
| 1101 |
+
|
| 1102 |
+
# If we're at a '\r', then read one extra character, since
|
| 1103 |
+
# it might be a '\n', to get the proper line ending.
|
| 1104 |
+
if new_chars and new_chars.endswith("\r"):
|
| 1105 |
+
new_chars += self._read(1)
|
| 1106 |
+
|
| 1107 |
+
chars += new_chars
|
| 1108 |
+
lines = chars.splitlines(True)
|
| 1109 |
+
if len(lines) > 1:
|
| 1110 |
+
line = lines[0]
|
| 1111 |
+
self.linebuffer = lines[1:]
|
| 1112 |
+
self._rewind_numchars = len(new_chars) - (len(chars) - len(line))
|
| 1113 |
+
self._rewind_checkpoint = startpos
|
| 1114 |
+
break
|
| 1115 |
+
elif len(lines) == 1:
|
| 1116 |
+
line0withend = lines[0]
|
| 1117 |
+
line0withoutend = lines[0].splitlines(False)[0]
|
| 1118 |
+
if line0withend != line0withoutend: # complete line
|
| 1119 |
+
line = line0withend
|
| 1120 |
+
break
|
| 1121 |
+
|
| 1122 |
+
if not new_chars or size is not None:
|
| 1123 |
+
line = chars
|
| 1124 |
+
break
|
| 1125 |
+
|
| 1126 |
+
# Read successively larger blocks of text.
|
| 1127 |
+
if readsize < 8000:
|
| 1128 |
+
readsize *= 2
|
| 1129 |
+
|
| 1130 |
+
return line
|
| 1131 |
+
|
| 1132 |
+
def readlines(self, sizehint=None, keepends=True):
|
| 1133 |
+
"""
|
| 1134 |
+
Read this file's contents, decode them using this reader's
|
| 1135 |
+
encoding, and return it as a list of unicode lines.
|
| 1136 |
+
|
| 1137 |
+
:rtype: list(unicode)
|
| 1138 |
+
:param sizehint: Ignored.
|
| 1139 |
+
:param keepends: If false, then strip newlines.
|
| 1140 |
+
"""
|
| 1141 |
+
return self.read().splitlines(keepends)
|
| 1142 |
+
|
| 1143 |
+
def next(self):
|
| 1144 |
+
"""Return the next decoded line from the underlying stream."""
|
| 1145 |
+
line = self.readline()
|
| 1146 |
+
if line:
|
| 1147 |
+
return line
|
| 1148 |
+
else:
|
| 1149 |
+
raise StopIteration
|
| 1150 |
+
|
| 1151 |
+
def __next__(self):
|
| 1152 |
+
return self.next()
|
| 1153 |
+
|
| 1154 |
+
def __iter__(self):
|
| 1155 |
+
"""Return self"""
|
| 1156 |
+
return self
|
| 1157 |
+
|
| 1158 |
+
def __del__(self):
|
| 1159 |
+
# let garbage collector deal with still opened streams
|
| 1160 |
+
if not self.closed:
|
| 1161 |
+
self.close()
|
| 1162 |
+
|
| 1163 |
+
def __enter__(self):
|
| 1164 |
+
return self
|
| 1165 |
+
|
| 1166 |
+
def __exit__(self, type, value, traceback):
|
| 1167 |
+
self.close()
|
| 1168 |
+
|
| 1169 |
+
def xreadlines(self):
|
| 1170 |
+
"""Return self"""
|
| 1171 |
+
return self
|
| 1172 |
+
|
| 1173 |
+
# /////////////////////////////////////////////////////////////////
|
| 1174 |
+
# Pass-through methods & properties
|
| 1175 |
+
# /////////////////////////////////////////////////////////////////
|
| 1176 |
+
|
| 1177 |
+
@property
|
| 1178 |
+
def closed(self):
|
| 1179 |
+
"""True if the underlying stream is closed."""
|
| 1180 |
+
return self.stream.closed
|
| 1181 |
+
|
| 1182 |
+
@property
|
| 1183 |
+
def name(self):
|
| 1184 |
+
"""The name of the underlying stream."""
|
| 1185 |
+
return self.stream.name
|
| 1186 |
+
|
| 1187 |
+
@property
|
| 1188 |
+
def mode(self):
|
| 1189 |
+
"""The mode of the underlying stream."""
|
| 1190 |
+
return self.stream.mode
|
| 1191 |
+
|
| 1192 |
+
def close(self):
|
| 1193 |
+
"""
|
| 1194 |
+
Close the underlying stream.
|
| 1195 |
+
"""
|
| 1196 |
+
self.stream.close()
|
| 1197 |
+
|
| 1198 |
+
# /////////////////////////////////////////////////////////////////
|
| 1199 |
+
# Seek and tell
|
| 1200 |
+
# /////////////////////////////////////////////////////////////////
|
| 1201 |
+
|
| 1202 |
+
def seek(self, offset, whence=0):
|
| 1203 |
+
"""
|
| 1204 |
+
Move the stream to a new file position. If the reader is
|
| 1205 |
+
maintaining any buffers, then they will be cleared.
|
| 1206 |
+
|
| 1207 |
+
:param offset: A byte count offset.
|
| 1208 |
+
:param whence: If 0, then the offset is from the start of the file
|
| 1209 |
+
(offset should be positive), if 1, then the offset is from the
|
| 1210 |
+
current position (offset may be positive or negative); and if 2,
|
| 1211 |
+
then the offset is from the end of the file (offset should
|
| 1212 |
+
typically be negative).
|
| 1213 |
+
"""
|
| 1214 |
+
if whence == 1:
|
| 1215 |
+
raise ValueError(
|
| 1216 |
+
"Relative seek is not supported for "
|
| 1217 |
+
"SeekableUnicodeStreamReader -- consider "
|
| 1218 |
+
"using char_seek_forward() instead."
|
| 1219 |
+
)
|
| 1220 |
+
self.stream.seek(offset, whence)
|
| 1221 |
+
self.linebuffer = None
|
| 1222 |
+
self.bytebuffer = b""
|
| 1223 |
+
self._rewind_numchars = None
|
| 1224 |
+
self._rewind_checkpoint = self.stream.tell()
|
| 1225 |
+
|
| 1226 |
+
def char_seek_forward(self, offset):
|
| 1227 |
+
"""
|
| 1228 |
+
Move the read pointer forward by ``offset`` characters.
|
| 1229 |
+
"""
|
| 1230 |
+
if offset < 0:
|
| 1231 |
+
raise ValueError("Negative offsets are not supported")
|
| 1232 |
+
# Clear all buffers.
|
| 1233 |
+
self.seek(self.tell())
|
| 1234 |
+
# Perform the seek operation.
|
| 1235 |
+
self._char_seek_forward(offset)
|
| 1236 |
+
|
| 1237 |
+
def _char_seek_forward(self, offset, est_bytes=None):
|
| 1238 |
+
"""
|
| 1239 |
+
Move the file position forward by ``offset`` characters,
|
| 1240 |
+
ignoring all buffers.
|
| 1241 |
+
|
| 1242 |
+
:param est_bytes: A hint, giving an estimate of the number of
|
| 1243 |
+
bytes that will be needed to move forward by ``offset`` chars.
|
| 1244 |
+
Defaults to ``offset``.
|
| 1245 |
+
"""
|
| 1246 |
+
if est_bytes is None:
|
| 1247 |
+
est_bytes = offset
|
| 1248 |
+
bytes = b""
|
| 1249 |
+
|
| 1250 |
+
while True:
|
| 1251 |
+
# Read in a block of bytes.
|
| 1252 |
+
newbytes = self.stream.read(est_bytes - len(bytes))
|
| 1253 |
+
bytes += newbytes
|
| 1254 |
+
|
| 1255 |
+
# Decode the bytes to characters.
|
| 1256 |
+
chars, bytes_decoded = self._incr_decode(bytes)
|
| 1257 |
+
|
| 1258 |
+
# If we got the right number of characters, then seek
|
| 1259 |
+
# backwards over any truncated characters, and return.
|
| 1260 |
+
if len(chars) == offset:
|
| 1261 |
+
self.stream.seek(-len(bytes) + bytes_decoded, 1)
|
| 1262 |
+
return
|
| 1263 |
+
|
| 1264 |
+
# If we went too far, then we can back-up until we get it
|
| 1265 |
+
# right, using the bytes we've already read.
|
| 1266 |
+
if len(chars) > offset:
|
| 1267 |
+
while len(chars) > offset:
|
| 1268 |
+
# Assume at least one byte/char.
|
| 1269 |
+
est_bytes += offset - len(chars)
|
| 1270 |
+
chars, bytes_decoded = self._incr_decode(bytes[:est_bytes])
|
| 1271 |
+
self.stream.seek(-len(bytes) + bytes_decoded, 1)
|
| 1272 |
+
return
|
| 1273 |
+
|
| 1274 |
+
# Otherwise, we haven't read enough bytes yet; loop again.
|
| 1275 |
+
est_bytes += offset - len(chars)
|
| 1276 |
+
|
| 1277 |
+
def tell(self):
|
| 1278 |
+
"""
|
| 1279 |
+
Return the current file position on the underlying byte
|
| 1280 |
+
stream. If this reader is maintaining any buffers, then the
|
| 1281 |
+
returned file position will be the position of the beginning
|
| 1282 |
+
of those buffers.
|
| 1283 |
+
"""
|
| 1284 |
+
# If nothing's buffered, then just return our current filepos:
|
| 1285 |
+
if self.linebuffer is None:
|
| 1286 |
+
return self.stream.tell() - len(self.bytebuffer)
|
| 1287 |
+
|
| 1288 |
+
# Otherwise, we'll need to backtrack the filepos until we
|
| 1289 |
+
# reach the beginning of the buffer.
|
| 1290 |
+
|
| 1291 |
+
# Store our original file position, so we can return here.
|
| 1292 |
+
orig_filepos = self.stream.tell()
|
| 1293 |
+
|
| 1294 |
+
# Calculate an estimate of where we think the newline is.
|
| 1295 |
+
bytes_read = (orig_filepos - len(self.bytebuffer)) - self._rewind_checkpoint
|
| 1296 |
+
buf_size = sum(len(line) for line in self.linebuffer)
|
| 1297 |
+
est_bytes = int(
|
| 1298 |
+
bytes_read * self._rewind_numchars / (self._rewind_numchars + buf_size)
|
| 1299 |
+
)
|
| 1300 |
+
|
| 1301 |
+
self.stream.seek(self._rewind_checkpoint)
|
| 1302 |
+
self._char_seek_forward(self._rewind_numchars, est_bytes)
|
| 1303 |
+
filepos = self.stream.tell()
|
| 1304 |
+
|
| 1305 |
+
# Sanity check
|
| 1306 |
+
if self.DEBUG:
|
| 1307 |
+
self.stream.seek(filepos)
|
| 1308 |
+
check1 = self._incr_decode(self.stream.read(50))[0]
|
| 1309 |
+
check2 = "".join(self.linebuffer)
|
| 1310 |
+
assert check1.startswith(check2) or check2.startswith(check1)
|
| 1311 |
+
|
| 1312 |
+
# Return to our original filepos (so we don't have to throw
|
| 1313 |
+
# out our buffer.)
|
| 1314 |
+
self.stream.seek(orig_filepos)
|
| 1315 |
+
|
| 1316 |
+
# Return the calculated filepos
|
| 1317 |
+
return filepos
|
| 1318 |
+
|
| 1319 |
+
# /////////////////////////////////////////////////////////////////
|
| 1320 |
+
# Helper methods
|
| 1321 |
+
# /////////////////////////////////////////////////////////////////
|
| 1322 |
+
|
| 1323 |
+
def _read(self, size=None):
|
| 1324 |
+
"""
|
| 1325 |
+
Read up to ``size`` bytes from the underlying stream, decode
|
| 1326 |
+
them using this reader's encoding, and return the resulting
|
| 1327 |
+
unicode string. ``linebuffer`` is not included in the result.
|
| 1328 |
+
"""
|
| 1329 |
+
if size == 0:
|
| 1330 |
+
return ""
|
| 1331 |
+
|
| 1332 |
+
# Skip past the byte order marker, if present.
|
| 1333 |
+
if self._bom and self.stream.tell() == 0:
|
| 1334 |
+
self.stream.read(self._bom)
|
| 1335 |
+
|
| 1336 |
+
# Read the requested number of bytes.
|
| 1337 |
+
if size is None:
|
| 1338 |
+
new_bytes = self.stream.read()
|
| 1339 |
+
else:
|
| 1340 |
+
new_bytes = self.stream.read(size)
|
| 1341 |
+
bytes = self.bytebuffer + new_bytes
|
| 1342 |
+
|
| 1343 |
+
# Decode the bytes into unicode characters
|
| 1344 |
+
chars, bytes_decoded = self._incr_decode(bytes)
|
| 1345 |
+
|
| 1346 |
+
# If we got bytes but couldn't decode any, then read further.
|
| 1347 |
+
if (size is not None) and (not chars) and (len(new_bytes) > 0):
|
| 1348 |
+
while not chars:
|
| 1349 |
+
new_bytes = self.stream.read(1)
|
| 1350 |
+
if not new_bytes:
|
| 1351 |
+
break # end of file.
|
| 1352 |
+
bytes += new_bytes
|
| 1353 |
+
chars, bytes_decoded = self._incr_decode(bytes)
|
| 1354 |
+
|
| 1355 |
+
# Record any bytes we didn't consume.
|
| 1356 |
+
self.bytebuffer = bytes[bytes_decoded:]
|
| 1357 |
+
|
| 1358 |
+
# Return the result
|
| 1359 |
+
return chars
|
| 1360 |
+
|
| 1361 |
+
def _incr_decode(self, bytes):
|
| 1362 |
+
"""
|
| 1363 |
+
Decode the given byte string into a unicode string, using this
|
| 1364 |
+
reader's encoding. If an exception is encountered that
|
| 1365 |
+
appears to be caused by a truncation error, then just decode
|
| 1366 |
+
the byte string without the bytes that cause the trunctaion
|
| 1367 |
+
error.
|
| 1368 |
+
|
| 1369 |
+
Return a tuple ``(chars, num_consumed)``, where ``chars`` is
|
| 1370 |
+
the decoded unicode string, and ``num_consumed`` is the
|
| 1371 |
+
number of bytes that were consumed.
|
| 1372 |
+
"""
|
| 1373 |
+
while True:
|
| 1374 |
+
try:
|
| 1375 |
+
return self.decode(bytes, "strict")
|
| 1376 |
+
except UnicodeDecodeError as exc:
|
| 1377 |
+
# If the exception occurs at the end of the string,
|
| 1378 |
+
# then assume that it's a truncation error.
|
| 1379 |
+
if exc.end == len(bytes):
|
| 1380 |
+
return self.decode(bytes[: exc.start], self.errors)
|
| 1381 |
+
|
| 1382 |
+
# Otherwise, if we're being strict, then raise it.
|
| 1383 |
+
elif self.errors == "strict":
|
| 1384 |
+
raise
|
| 1385 |
+
|
| 1386 |
+
# If we're not strict, then re-process it with our
|
| 1387 |
+
# errors setting. This *may* raise an exception.
|
| 1388 |
+
else:
|
| 1389 |
+
return self.decode(bytes, self.errors)
|
| 1390 |
+
|
| 1391 |
+
_BOM_TABLE = {
|
| 1392 |
+
"utf8": [(codecs.BOM_UTF8, None)],
|
| 1393 |
+
"utf16": [(codecs.BOM_UTF16_LE, "utf16-le"), (codecs.BOM_UTF16_BE, "utf16-be")],
|
| 1394 |
+
"utf16le": [(codecs.BOM_UTF16_LE, None)],
|
| 1395 |
+
"utf16be": [(codecs.BOM_UTF16_BE, None)],
|
| 1396 |
+
"utf32": [(codecs.BOM_UTF32_LE, "utf32-le"), (codecs.BOM_UTF32_BE, "utf32-be")],
|
| 1397 |
+
"utf32le": [(codecs.BOM_UTF32_LE, None)],
|
| 1398 |
+
"utf32be": [(codecs.BOM_UTF32_BE, None)],
|
| 1399 |
+
}
|
| 1400 |
+
|
| 1401 |
+
def _check_bom(self):
|
| 1402 |
+
# Normalize our encoding name
|
| 1403 |
+
enc = re.sub("[ -]", "", self.encoding.lower())
|
| 1404 |
+
|
| 1405 |
+
# Look up our encoding in the BOM table.
|
| 1406 |
+
bom_info = self._BOM_TABLE.get(enc)
|
| 1407 |
+
|
| 1408 |
+
if bom_info:
|
| 1409 |
+
# Read a prefix, to check against the BOM(s)
|
| 1410 |
+
bytes = self.stream.read(16)
|
| 1411 |
+
self.stream.seek(0)
|
| 1412 |
+
|
| 1413 |
+
# Check for each possible BOM.
|
| 1414 |
+
for (bom, new_encoding) in bom_info:
|
| 1415 |
+
if bytes.startswith(bom):
|
| 1416 |
+
if new_encoding:
|
| 1417 |
+
self.encoding = new_encoding
|
| 1418 |
+
return len(bom)
|
| 1419 |
+
|
| 1420 |
+
return None
|
| 1421 |
+
|
| 1422 |
+
|
| 1423 |
+
__all__ = [
|
| 1424 |
+
"path",
|
| 1425 |
+
"PathPointer",
|
| 1426 |
+
"FileSystemPathPointer",
|
| 1427 |
+
"BufferedGzipFile",
|
| 1428 |
+
"GzipFileSystemPathPointer",
|
| 1429 |
+
"GzipFileSystemPathPointer",
|
| 1430 |
+
"find",
|
| 1431 |
+
"retrieve",
|
| 1432 |
+
"FORMATS",
|
| 1433 |
+
"AUTO_FORMATS",
|
| 1434 |
+
"load",
|
| 1435 |
+
"show_cfg",
|
| 1436 |
+
"clear_cache",
|
| 1437 |
+
"LazyLoader",
|
| 1438 |
+
"OpenOnDemandZipFile",
|
| 1439 |
+
"GzipFileSystemPathPointer",
|
| 1440 |
+
"SeekableUnicodeStreamReader",
|
| 1441 |
+
]
|
lib/python3.10/site-packages/nltk/decorators.py
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Decorator module by Michele Simionato <michelesimionato@libero.it>
|
| 3 |
+
Copyright Michele Simionato, distributed under the terms of the BSD License (see below).
|
| 4 |
+
http://www.phyast.pitt.edu/~micheles/python/documentation.html
|
| 5 |
+
|
| 6 |
+
Included in NLTK for its support of a nice memoization decorator.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
__docformat__ = "restructuredtext en"
|
| 10 |
+
|
| 11 |
+
## The basic trick is to generate the source code for the decorated function
|
| 12 |
+
## with the right signature and to evaluate it.
|
| 13 |
+
## Uncomment the statement 'print >> sys.stderr, func_src' in _decorator
|
| 14 |
+
## to understand what is going on.
|
| 15 |
+
|
| 16 |
+
__all__ = ["decorator", "new_wrapper", "getinfo"]
|
| 17 |
+
|
| 18 |
+
import sys
|
| 19 |
+
|
| 20 |
+
# Hack to keep NLTK's "tokenize" module from colliding with the "tokenize" in
|
| 21 |
+
# the Python standard library.
|
| 22 |
+
OLD_SYS_PATH = sys.path[:]
|
| 23 |
+
sys.path = [p for p in sys.path if p and "nltk" not in str(p)]
|
| 24 |
+
import inspect
|
| 25 |
+
|
| 26 |
+
sys.path = OLD_SYS_PATH
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def __legacysignature(signature):
|
| 30 |
+
"""
|
| 31 |
+
For retrocompatibility reasons, we don't use a standard Signature.
|
| 32 |
+
Instead, we use the string generated by this method.
|
| 33 |
+
Basically, from a Signature we create a string and remove the default values.
|
| 34 |
+
"""
|
| 35 |
+
listsignature = str(signature)[1:-1].split(",")
|
| 36 |
+
for counter, param in enumerate(listsignature):
|
| 37 |
+
if param.count("=") > 0:
|
| 38 |
+
listsignature[counter] = param[0 : param.index("=")].strip()
|
| 39 |
+
else:
|
| 40 |
+
listsignature[counter] = param.strip()
|
| 41 |
+
return ", ".join(listsignature)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def getinfo(func):
|
| 45 |
+
"""
|
| 46 |
+
Returns an info dictionary containing:
|
| 47 |
+
- name (the name of the function : str)
|
| 48 |
+
- argnames (the names of the arguments : list)
|
| 49 |
+
- defaults (the values of the default arguments : tuple)
|
| 50 |
+
- signature (the signature : str)
|
| 51 |
+
- fullsignature (the full signature : Signature)
|
| 52 |
+
- doc (the docstring : str)
|
| 53 |
+
- module (the module name : str)
|
| 54 |
+
- dict (the function __dict__ : str)
|
| 55 |
+
|
| 56 |
+
>>> def f(self, x=1, y=2, *args, **kw): pass
|
| 57 |
+
|
| 58 |
+
>>> info = getinfo(f)
|
| 59 |
+
|
| 60 |
+
>>> info["name"]
|
| 61 |
+
'f'
|
| 62 |
+
>>> info["argnames"]
|
| 63 |
+
['self', 'x', 'y', 'args', 'kw']
|
| 64 |
+
|
| 65 |
+
>>> info["defaults"]
|
| 66 |
+
(1, 2)
|
| 67 |
+
|
| 68 |
+
>>> info["signature"]
|
| 69 |
+
'self, x, y, *args, **kw'
|
| 70 |
+
|
| 71 |
+
>>> info["fullsignature"]
|
| 72 |
+
<Signature (self, x=1, y=2, *args, **kw)>
|
| 73 |
+
"""
|
| 74 |
+
assert inspect.ismethod(func) or inspect.isfunction(func)
|
| 75 |
+
argspec = inspect.getfullargspec(func)
|
| 76 |
+
regargs, varargs, varkwargs = argspec[:3]
|
| 77 |
+
argnames = list(regargs)
|
| 78 |
+
if varargs:
|
| 79 |
+
argnames.append(varargs)
|
| 80 |
+
if varkwargs:
|
| 81 |
+
argnames.append(varkwargs)
|
| 82 |
+
fullsignature = inspect.signature(func)
|
| 83 |
+
# Convert Signature to str
|
| 84 |
+
signature = __legacysignature(fullsignature)
|
| 85 |
+
|
| 86 |
+
# pypy compatibility
|
| 87 |
+
if hasattr(func, "__closure__"):
|
| 88 |
+
_closure = func.__closure__
|
| 89 |
+
_globals = func.__globals__
|
| 90 |
+
else:
|
| 91 |
+
_closure = func.func_closure
|
| 92 |
+
_globals = func.func_globals
|
| 93 |
+
|
| 94 |
+
return dict(
|
| 95 |
+
name=func.__name__,
|
| 96 |
+
argnames=argnames,
|
| 97 |
+
signature=signature,
|
| 98 |
+
fullsignature=fullsignature,
|
| 99 |
+
defaults=func.__defaults__,
|
| 100 |
+
doc=func.__doc__,
|
| 101 |
+
module=func.__module__,
|
| 102 |
+
dict=func.__dict__,
|
| 103 |
+
globals=_globals,
|
| 104 |
+
closure=_closure,
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def update_wrapper(wrapper, model, infodict=None):
|
| 109 |
+
"akin to functools.update_wrapper"
|
| 110 |
+
infodict = infodict or getinfo(model)
|
| 111 |
+
wrapper.__name__ = infodict["name"]
|
| 112 |
+
wrapper.__doc__ = infodict["doc"]
|
| 113 |
+
wrapper.__module__ = infodict["module"]
|
| 114 |
+
wrapper.__dict__.update(infodict["dict"])
|
| 115 |
+
wrapper.__defaults__ = infodict["defaults"]
|
| 116 |
+
wrapper.undecorated = model
|
| 117 |
+
return wrapper
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def new_wrapper(wrapper, model):
|
| 121 |
+
"""
|
| 122 |
+
An improvement over functools.update_wrapper. The wrapper is a generic
|
| 123 |
+
callable object. It works by generating a copy of the wrapper with the
|
| 124 |
+
right signature and by updating the copy, not the original.
|
| 125 |
+
Moreovoer, 'model' can be a dictionary with keys 'name', 'doc', 'module',
|
| 126 |
+
'dict', 'defaults'.
|
| 127 |
+
"""
|
| 128 |
+
if isinstance(model, dict):
|
| 129 |
+
infodict = model
|
| 130 |
+
else: # assume model is a function
|
| 131 |
+
infodict = getinfo(model)
|
| 132 |
+
assert (
|
| 133 |
+
not "_wrapper_" in infodict["argnames"]
|
| 134 |
+
), '"_wrapper_" is a reserved argument name!'
|
| 135 |
+
src = "lambda %(signature)s: _wrapper_(%(signature)s)" % infodict
|
| 136 |
+
funcopy = eval(src, dict(_wrapper_=wrapper))
|
| 137 |
+
return update_wrapper(funcopy, model, infodict)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# helper used in decorator_factory
|
| 141 |
+
def __call__(self, func):
|
| 142 |
+
return new_wrapper(lambda *a, **k: self.call(func, *a, **k), func)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def decorator_factory(cls):
|
| 146 |
+
"""
|
| 147 |
+
Take a class with a ``.caller`` method and return a callable decorator
|
| 148 |
+
object. It works by adding a suitable __call__ method to the class;
|
| 149 |
+
it raises a TypeError if the class already has a nontrivial __call__
|
| 150 |
+
method.
|
| 151 |
+
"""
|
| 152 |
+
attrs = set(dir(cls))
|
| 153 |
+
if "__call__" in attrs:
|
| 154 |
+
raise TypeError(
|
| 155 |
+
"You cannot decorate a class with a nontrivial " "__call__ method"
|
| 156 |
+
)
|
| 157 |
+
if "call" not in attrs:
|
| 158 |
+
raise TypeError("You cannot decorate a class without a " ".call method")
|
| 159 |
+
cls.__call__ = __call__
|
| 160 |
+
return cls
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def decorator(caller):
|
| 164 |
+
"""
|
| 165 |
+
General purpose decorator factory: takes a caller function as
|
| 166 |
+
input and returns a decorator with the same attributes.
|
| 167 |
+
A caller function is any function like this::
|
| 168 |
+
|
| 169 |
+
def caller(func, *args, **kw):
|
| 170 |
+
# do something
|
| 171 |
+
return func(*args, **kw)
|
| 172 |
+
|
| 173 |
+
Here is an example of usage:
|
| 174 |
+
|
| 175 |
+
>>> @decorator
|
| 176 |
+
... def chatty(f, *args, **kw):
|
| 177 |
+
... print("Calling %r" % f.__name__)
|
| 178 |
+
... return f(*args, **kw)
|
| 179 |
+
|
| 180 |
+
>>> chatty.__name__
|
| 181 |
+
'chatty'
|
| 182 |
+
|
| 183 |
+
>>> @chatty
|
| 184 |
+
... def f(): pass
|
| 185 |
+
...
|
| 186 |
+
>>> f()
|
| 187 |
+
Calling 'f'
|
| 188 |
+
|
| 189 |
+
decorator can also take in input a class with a .caller method; in this
|
| 190 |
+
case it converts the class into a factory of callable decorator objects.
|
| 191 |
+
See the documentation for an example.
|
| 192 |
+
"""
|
| 193 |
+
if inspect.isclass(caller):
|
| 194 |
+
return decorator_factory(caller)
|
| 195 |
+
|
| 196 |
+
def _decorator(func): # the real meat is here
|
| 197 |
+
infodict = getinfo(func)
|
| 198 |
+
argnames = infodict["argnames"]
|
| 199 |
+
assert not (
|
| 200 |
+
"_call_" in argnames or "_func_" in argnames
|
| 201 |
+
), "You cannot use _call_ or _func_ as argument names!"
|
| 202 |
+
src = "lambda %(signature)s: _call_(_func_, %(signature)s)" % infodict
|
| 203 |
+
# import sys; print >> sys.stderr, src # for debugging purposes
|
| 204 |
+
dec_func = eval(src, dict(_func_=func, _call_=caller))
|
| 205 |
+
return update_wrapper(dec_func, func, infodict)
|
| 206 |
+
|
| 207 |
+
return update_wrapper(_decorator, caller)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def getattr_(obj, name, default_thunk):
|
| 211 |
+
"Similar to .setdefault in dictionaries."
|
| 212 |
+
try:
|
| 213 |
+
return getattr(obj, name)
|
| 214 |
+
except AttributeError:
|
| 215 |
+
default = default_thunk()
|
| 216 |
+
setattr(obj, name, default)
|
| 217 |
+
return default
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
@decorator
|
| 221 |
+
def memoize(func, *args):
|
| 222 |
+
dic = getattr_(func, "memoize_dic", dict)
|
| 223 |
+
# memoize_dic is created at the first call
|
| 224 |
+
if args in dic:
|
| 225 |
+
return dic[args]
|
| 226 |
+
result = func(*args)
|
| 227 |
+
dic[args] = result
|
| 228 |
+
return result
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
########################## LEGALESE ###############################
|
| 232 |
+
|
| 233 |
+
## Redistributions of source code must retain the above copyright
|
| 234 |
+
## notice, this list of conditions and the following disclaimer.
|
| 235 |
+
## Redistributions in bytecode form must reproduce the above copyright
|
| 236 |
+
## notice, this list of conditions and the following disclaimer in
|
| 237 |
+
## the documentation and/or other materials provided with the
|
| 238 |
+
## distribution.
|
| 239 |
+
|
| 240 |
+
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 241 |
+
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 242 |
+
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 243 |
+
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 244 |
+
## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
| 245 |
+
## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
| 246 |
+
## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
| 247 |
+
## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
| 248 |
+
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
|
| 249 |
+
## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
| 250 |
+
## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
|
| 251 |
+
## DAMAGE.
|
lib/python3.10/site-packages/nltk/downloader.py
ADDED
|
@@ -0,0 +1,2559 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: Corpus & Model Downloader
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Author: Edward Loper <edloper@gmail.com>
|
| 5 |
+
# URL: <https://www.nltk.org/>
|
| 6 |
+
# For license information, see LICENSE.TXT
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
The NLTK corpus and module downloader. This module defines several
|
| 10 |
+
interfaces which can be used to download corpora, models, and other
|
| 11 |
+
data packages that can be used with NLTK.
|
| 12 |
+
|
| 13 |
+
Downloading Packages
|
| 14 |
+
====================
|
| 15 |
+
If called with no arguments, ``download()`` will display an interactive
|
| 16 |
+
interface which can be used to download and install new packages.
|
| 17 |
+
If Tkinter is available, then a graphical interface will be shown,
|
| 18 |
+
otherwise a simple text interface will be provided.
|
| 19 |
+
|
| 20 |
+
Individual packages can be downloaded by calling the ``download()``
|
| 21 |
+
function with a single argument, giving the package identifier for the
|
| 22 |
+
package that should be downloaded:
|
| 23 |
+
|
| 24 |
+
>>> download('treebank') # doctest: +SKIP
|
| 25 |
+
[nltk_data] Downloading package 'treebank'...
|
| 26 |
+
[nltk_data] Unzipping corpora/treebank.zip.
|
| 27 |
+
|
| 28 |
+
NLTK also provides a number of \"package collections\", consisting of
|
| 29 |
+
a group of related packages. To download all packages in a
|
| 30 |
+
colleciton, simply call ``download()`` with the collection's
|
| 31 |
+
identifier:
|
| 32 |
+
|
| 33 |
+
>>> download('all-corpora') # doctest: +SKIP
|
| 34 |
+
[nltk_data] Downloading package 'abc'...
|
| 35 |
+
[nltk_data] Unzipping corpora/abc.zip.
|
| 36 |
+
[nltk_data] Downloading package 'alpino'...
|
| 37 |
+
[nltk_data] Unzipping corpora/alpino.zip.
|
| 38 |
+
...
|
| 39 |
+
[nltk_data] Downloading package 'words'...
|
| 40 |
+
[nltk_data] Unzipping corpora/words.zip.
|
| 41 |
+
|
| 42 |
+
Download Directory
|
| 43 |
+
==================
|
| 44 |
+
By default, packages are installed in either a system-wide directory
|
| 45 |
+
(if Python has sufficient access to write to it); or in the current
|
| 46 |
+
user's home directory. However, the ``download_dir`` argument may be
|
| 47 |
+
used to specify a different installation target, if desired.
|
| 48 |
+
|
| 49 |
+
See ``Downloader.default_download_dir()`` for more a detailed
|
| 50 |
+
description of how the default download directory is chosen.
|
| 51 |
+
|
| 52 |
+
NLTK Download Server
|
| 53 |
+
====================
|
| 54 |
+
Before downloading any packages, the corpus and module downloader
|
| 55 |
+
contacts the NLTK download server, to retrieve an index file
|
| 56 |
+
describing the available packages. By default, this index file is
|
| 57 |
+
loaded from ``https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml``.
|
| 58 |
+
If necessary, it is possible to create a new ``Downloader`` object,
|
| 59 |
+
specifying a different URL for the package index file.
|
| 60 |
+
|
| 61 |
+
Usage::
|
| 62 |
+
|
| 63 |
+
python nltk/downloader.py [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
|
| 64 |
+
|
| 65 |
+
or::
|
| 66 |
+
|
| 67 |
+
python -m nltk.downloader [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS
|
| 68 |
+
"""
|
| 69 |
+
# ----------------------------------------------------------------------
|
| 70 |
+
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
0 1 2 3
|
| 74 |
+
[label][----][label][----]
|
| 75 |
+
[column ][column ]
|
| 76 |
+
|
| 77 |
+
Notes
|
| 78 |
+
=====
|
| 79 |
+
Handling data files.. Some questions:
|
| 80 |
+
|
| 81 |
+
* Should the data files be kept zipped or unzipped? I say zipped.
|
| 82 |
+
|
| 83 |
+
* Should the data files be kept in svn at all? Advantages: history;
|
| 84 |
+
automatic version numbers; 'svn up' could be used rather than the
|
| 85 |
+
downloader to update the corpora. Disadvantages: they're big,
|
| 86 |
+
which makes working from svn a bit of a pain. And we're planning
|
| 87 |
+
to potentially make them much bigger. I don't think we want
|
| 88 |
+
people to have to download 400MB corpora just to use nltk from svn.
|
| 89 |
+
|
| 90 |
+
* Compromise: keep the data files in trunk/data rather than in
|
| 91 |
+
trunk/nltk. That way you can check them out in svn if you want
|
| 92 |
+
to; but you don't need to, and you can use the downloader instead.
|
| 93 |
+
|
| 94 |
+
* Also: keep models in mind. When we change the code, we'd
|
| 95 |
+
potentially like the models to get updated. This could require a
|
| 96 |
+
little thought.
|
| 97 |
+
|
| 98 |
+
* So.. let's assume we have a trunk/data directory, containing a bunch
|
| 99 |
+
of packages. The packages should be kept as zip files, because we
|
| 100 |
+
really shouldn't be editing them much (well -- we may edit models
|
| 101 |
+
more, but they tend to be binary-ish files anyway, where diffs
|
| 102 |
+
aren't that helpful). So we'll have trunk/data, with a bunch of
|
| 103 |
+
files like abc.zip and treebank.zip and propbank.zip. For each
|
| 104 |
+
package we could also have eg treebank.xml and propbank.xml,
|
| 105 |
+
describing the contents of the package (name, copyright, license,
|
| 106 |
+
etc). Collections would also have .xml files. Finally, we would
|
| 107 |
+
pull all these together to form a single index.xml file. Some
|
| 108 |
+
directory structure wouldn't hurt. So how about::
|
| 109 |
+
|
| 110 |
+
/trunk/data/ ....................... root of data svn
|
| 111 |
+
index.xml ........................ main index file
|
| 112 |
+
src/ ............................. python scripts
|
| 113 |
+
packages/ ........................ dir for packages
|
| 114 |
+
corpora/ ....................... zip & xml files for corpora
|
| 115 |
+
grammars/ ...................... zip & xml files for grammars
|
| 116 |
+
taggers/ ....................... zip & xml files for taggers
|
| 117 |
+
tokenizers/ .................... zip & xml files for tokenizers
|
| 118 |
+
etc.
|
| 119 |
+
collections/ ..................... xml files for collections
|
| 120 |
+
|
| 121 |
+
Where the root (/trunk/data) would contain a makefile; and src/
|
| 122 |
+
would contain a script to update the info.xml file. It could also
|
| 123 |
+
contain scripts to rebuild some of the various model files. The
|
| 124 |
+
script that builds index.xml should probably check that each zip
|
| 125 |
+
file expands entirely into a single subdir, whose name matches the
|
| 126 |
+
package's uid.
|
| 127 |
+
|
| 128 |
+
Changes I need to make:
|
| 129 |
+
- in index: change "size" to "filesize" or "compressed-size"
|
| 130 |
+
- in index: add "unzipped-size"
|
| 131 |
+
- when checking status: check both compressed & uncompressed size.
|
| 132 |
+
uncompressed size is important to make sure we detect a problem
|
| 133 |
+
if something got partially unzipped. define new status values
|
| 134 |
+
to differentiate stale vs corrupt vs corruptly-uncompressed??
|
| 135 |
+
(we shouldn't need to re-download the file if the zip file is ok
|
| 136 |
+
but it didn't get uncompressed fully.)
|
| 137 |
+
- add other fields to the index: author, license, copyright, contact,
|
| 138 |
+
etc.
|
| 139 |
+
|
| 140 |
+
the current grammars/ package would become a single new package (eg
|
| 141 |
+
toy-grammars or book-grammars).
|
| 142 |
+
|
| 143 |
+
xml file should have:
|
| 144 |
+
- authorship info
|
| 145 |
+
- license info
|
| 146 |
+
- copyright info
|
| 147 |
+
- contact info
|
| 148 |
+
- info about what type of data/annotation it contains?
|
| 149 |
+
- recommended corpus reader?
|
| 150 |
+
|
| 151 |
+
collections can contain other collections. they can also contain
|
| 152 |
+
multiple package types (corpora & models). Have a single 'basics'
|
| 153 |
+
package that includes everything we talk about in the book?
|
| 154 |
+
|
| 155 |
+
n.b.: there will have to be a fallback to the punkt tokenizer, in case
|
| 156 |
+
they didn't download that model.
|
| 157 |
+
|
| 158 |
+
default: unzip or not?
|
| 159 |
+
|
| 160 |
+
"""
|
| 161 |
+
import functools
|
| 162 |
+
import itertools
|
| 163 |
+
import os
|
| 164 |
+
import shutil
|
| 165 |
+
import subprocess
|
| 166 |
+
import sys
|
| 167 |
+
import textwrap
|
| 168 |
+
import threading
|
| 169 |
+
import time
|
| 170 |
+
import warnings
|
| 171 |
+
import zipfile
|
| 172 |
+
from hashlib import md5
|
| 173 |
+
from xml.etree import ElementTree
|
| 174 |
+
|
| 175 |
+
try:
|
| 176 |
+
TKINTER = True
|
| 177 |
+
from tkinter import Button, Canvas, Entry, Frame, IntVar, Label, Menu, TclError, Tk
|
| 178 |
+
from tkinter.messagebox import showerror
|
| 179 |
+
|
| 180 |
+
from nltk.draw.table import Table
|
| 181 |
+
from nltk.draw.util import ShowText
|
| 182 |
+
except ImportError:
|
| 183 |
+
TKINTER = False
|
| 184 |
+
TclError = ValueError
|
| 185 |
+
|
| 186 |
+
from urllib.error import HTTPError, URLError
|
| 187 |
+
from urllib.request import urlopen
|
| 188 |
+
|
| 189 |
+
import nltk
|
| 190 |
+
|
| 191 |
+
# urllib2 = nltk.internals.import_from_stdlib('urllib2')
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
######################################################################
|
| 195 |
+
# Directory entry objects (from the data server's index file)
|
| 196 |
+
######################################################################
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
class Package:
|
| 200 |
+
"""
|
| 201 |
+
A directory entry for a downloadable package. These entries are
|
| 202 |
+
extracted from the XML index file that is downloaded by
|
| 203 |
+
``Downloader``. Each package consists of a single file; but if
|
| 204 |
+
that file is a zip file, then it can be automatically decompressed
|
| 205 |
+
when the package is installed.
|
| 206 |
+
"""
|
| 207 |
+
|
| 208 |
+
def __init__(
|
| 209 |
+
self,
|
| 210 |
+
id,
|
| 211 |
+
url,
|
| 212 |
+
name=None,
|
| 213 |
+
subdir="",
|
| 214 |
+
size=None,
|
| 215 |
+
unzipped_size=None,
|
| 216 |
+
checksum=None,
|
| 217 |
+
svn_revision=None,
|
| 218 |
+
copyright="Unknown",
|
| 219 |
+
contact="Unknown",
|
| 220 |
+
license="Unknown",
|
| 221 |
+
author="Unknown",
|
| 222 |
+
unzip=True,
|
| 223 |
+
**kw,
|
| 224 |
+
):
|
| 225 |
+
self.id = id
|
| 226 |
+
"""A unique identifier for this package."""
|
| 227 |
+
|
| 228 |
+
self.name = name or id
|
| 229 |
+
"""A string name for this package."""
|
| 230 |
+
|
| 231 |
+
self.subdir = subdir
|
| 232 |
+
"""The subdirectory where this package should be installed.
|
| 233 |
+
E.g., ``'corpora'`` or ``'taggers'``."""
|
| 234 |
+
|
| 235 |
+
self.url = url
|
| 236 |
+
"""A URL that can be used to download this package's file."""
|
| 237 |
+
|
| 238 |
+
self.size = int(size)
|
| 239 |
+
"""The filesize (in bytes) of the package file."""
|
| 240 |
+
|
| 241 |
+
self.unzipped_size = int(unzipped_size)
|
| 242 |
+
"""The total filesize of the files contained in the package's
|
| 243 |
+
zipfile."""
|
| 244 |
+
|
| 245 |
+
self.checksum = checksum
|
| 246 |
+
"""The MD-5 checksum of the package file."""
|
| 247 |
+
|
| 248 |
+
self.svn_revision = svn_revision
|
| 249 |
+
"""A subversion revision number for this package."""
|
| 250 |
+
|
| 251 |
+
self.copyright = copyright
|
| 252 |
+
"""Copyright holder for this package."""
|
| 253 |
+
|
| 254 |
+
self.contact = contact
|
| 255 |
+
"""Name & email of the person who should be contacted with
|
| 256 |
+
questions about this package."""
|
| 257 |
+
|
| 258 |
+
self.license = license
|
| 259 |
+
"""License information for this package."""
|
| 260 |
+
|
| 261 |
+
self.author = author
|
| 262 |
+
"""Author of this package."""
|
| 263 |
+
|
| 264 |
+
ext = os.path.splitext(url.split("/")[-1])[1]
|
| 265 |
+
self.filename = os.path.join(subdir, id + ext)
|
| 266 |
+
"""The filename that should be used for this package's file. It
|
| 267 |
+
is formed by joining ``self.subdir`` with ``self.id``, and
|
| 268 |
+
using the same extension as ``url``."""
|
| 269 |
+
|
| 270 |
+
self.unzip = bool(int(unzip)) # '0' or '1'
|
| 271 |
+
"""A flag indicating whether this corpus should be unzipped by
|
| 272 |
+
default."""
|
| 273 |
+
|
| 274 |
+
# Include any other attributes provided by the XML file.
|
| 275 |
+
self.__dict__.update(kw)
|
| 276 |
+
|
| 277 |
+
@staticmethod
|
| 278 |
+
def fromxml(xml):
|
| 279 |
+
if isinstance(xml, str):
|
| 280 |
+
xml = ElementTree.parse(xml)
|
| 281 |
+
for key in xml.attrib:
|
| 282 |
+
xml.attrib[key] = str(xml.attrib[key])
|
| 283 |
+
return Package(**xml.attrib)
|
| 284 |
+
|
| 285 |
+
def __lt__(self, other):
|
| 286 |
+
return self.id < other.id
|
| 287 |
+
|
| 288 |
+
def __repr__(self):
|
| 289 |
+
return "<Package %s>" % self.id
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
class Collection:
|
| 293 |
+
"""
|
| 294 |
+
A directory entry for a collection of downloadable packages.
|
| 295 |
+
These entries are extracted from the XML index file that is
|
| 296 |
+
downloaded by ``Downloader``.
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
def __init__(self, id, children, name=None, **kw):
|
| 300 |
+
self.id = id
|
| 301 |
+
"""A unique identifier for this collection."""
|
| 302 |
+
|
| 303 |
+
self.name = name or id
|
| 304 |
+
"""A string name for this collection."""
|
| 305 |
+
|
| 306 |
+
self.children = children
|
| 307 |
+
"""A list of the ``Collections`` or ``Packages`` directly
|
| 308 |
+
contained by this collection."""
|
| 309 |
+
|
| 310 |
+
self.packages = None
|
| 311 |
+
"""A list of ``Packages`` contained by this collection or any
|
| 312 |
+
collections it recursively contains."""
|
| 313 |
+
|
| 314 |
+
# Include any other attributes provided by the XML file.
|
| 315 |
+
self.__dict__.update(kw)
|
| 316 |
+
|
| 317 |
+
@staticmethod
|
| 318 |
+
def fromxml(xml):
|
| 319 |
+
if isinstance(xml, str):
|
| 320 |
+
xml = ElementTree.parse(xml)
|
| 321 |
+
for key in xml.attrib:
|
| 322 |
+
xml.attrib[key] = str(xml.attrib[key])
|
| 323 |
+
children = [child.get("ref") for child in xml.findall("item")]
|
| 324 |
+
return Collection(children=children, **xml.attrib)
|
| 325 |
+
|
| 326 |
+
def __lt__(self, other):
|
| 327 |
+
return self.id < other.id
|
| 328 |
+
|
| 329 |
+
def __repr__(self):
|
| 330 |
+
return "<Collection %s>" % self.id
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
######################################################################
|
| 334 |
+
# Message Passing Objects
|
| 335 |
+
######################################################################
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
class DownloaderMessage:
|
| 339 |
+
"""A status message object, used by ``incr_download`` to
|
| 340 |
+
communicate its progress."""
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
class StartCollectionMessage(DownloaderMessage):
|
| 344 |
+
"""Data server has started working on a collection of packages."""
|
| 345 |
+
|
| 346 |
+
def __init__(self, collection):
|
| 347 |
+
self.collection = collection
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
class FinishCollectionMessage(DownloaderMessage):
|
| 351 |
+
"""Data server has finished working on a collection of packages."""
|
| 352 |
+
|
| 353 |
+
def __init__(self, collection):
|
| 354 |
+
self.collection = collection
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
class StartPackageMessage(DownloaderMessage):
|
| 358 |
+
"""Data server has started working on a package."""
|
| 359 |
+
|
| 360 |
+
def __init__(self, package):
|
| 361 |
+
self.package = package
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class FinishPackageMessage(DownloaderMessage):
|
| 365 |
+
"""Data server has finished working on a package."""
|
| 366 |
+
|
| 367 |
+
def __init__(self, package):
|
| 368 |
+
self.package = package
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
class StartDownloadMessage(DownloaderMessage):
|
| 372 |
+
"""Data server has started downloading a package."""
|
| 373 |
+
|
| 374 |
+
def __init__(self, package):
|
| 375 |
+
self.package = package
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
class FinishDownloadMessage(DownloaderMessage):
|
| 379 |
+
"""Data server has finished downloading a package."""
|
| 380 |
+
|
| 381 |
+
def __init__(self, package):
|
| 382 |
+
self.package = package
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
class StartUnzipMessage(DownloaderMessage):
|
| 386 |
+
"""Data server has started unzipping a package."""
|
| 387 |
+
|
| 388 |
+
def __init__(self, package):
|
| 389 |
+
self.package = package
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
class FinishUnzipMessage(DownloaderMessage):
|
| 393 |
+
"""Data server has finished unzipping a package."""
|
| 394 |
+
|
| 395 |
+
def __init__(self, package):
|
| 396 |
+
self.package = package
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
class UpToDateMessage(DownloaderMessage):
|
| 400 |
+
"""The package download file is already up-to-date"""
|
| 401 |
+
|
| 402 |
+
def __init__(self, package):
|
| 403 |
+
self.package = package
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
class StaleMessage(DownloaderMessage):
|
| 407 |
+
"""The package download file is out-of-date or corrupt"""
|
| 408 |
+
|
| 409 |
+
def __init__(self, package):
|
| 410 |
+
self.package = package
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class ErrorMessage(DownloaderMessage):
|
| 414 |
+
"""Data server encountered an error"""
|
| 415 |
+
|
| 416 |
+
def __init__(self, package, message):
|
| 417 |
+
self.package = package
|
| 418 |
+
if isinstance(message, Exception):
|
| 419 |
+
self.message = str(message)
|
| 420 |
+
else:
|
| 421 |
+
self.message = message
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class ProgressMessage(DownloaderMessage):
|
| 425 |
+
"""Indicates how much progress the data server has made"""
|
| 426 |
+
|
| 427 |
+
def __init__(self, progress):
|
| 428 |
+
self.progress = progress
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
class SelectDownloadDirMessage(DownloaderMessage):
|
| 432 |
+
"""Indicates what download directory the data server is using"""
|
| 433 |
+
|
| 434 |
+
def __init__(self, download_dir):
|
| 435 |
+
self.download_dir = download_dir
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
######################################################################
|
| 439 |
+
# NLTK Data Server
|
| 440 |
+
######################################################################
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
class Downloader:
|
| 444 |
+
"""
|
| 445 |
+
A class used to access the NLTK data server, which can be used to
|
| 446 |
+
download corpora and other data packages.
|
| 447 |
+
"""
|
| 448 |
+
|
| 449 |
+
# /////////////////////////////////////////////////////////////////
|
| 450 |
+
# Configuration
|
| 451 |
+
# /////////////////////////////////////////////////////////////////
|
| 452 |
+
|
| 453 |
+
INDEX_TIMEOUT = 60 * 60 # 1 hour
|
| 454 |
+
"""The amount of time after which the cached copy of the data
|
| 455 |
+
server index will be considered 'stale,' and will be
|
| 456 |
+
re-downloaded."""
|
| 457 |
+
|
| 458 |
+
DEFAULT_URL = "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml"
|
| 459 |
+
"""The default URL for the NLTK data server's index. An
|
| 460 |
+
alternative URL can be specified when creating a new
|
| 461 |
+
``Downloader`` object."""
|
| 462 |
+
|
| 463 |
+
# /////////////////////////////////////////////////////////////////
|
| 464 |
+
# Status Constants
|
| 465 |
+
# /////////////////////////////////////////////////////////////////
|
| 466 |
+
|
| 467 |
+
INSTALLED = "installed"
|
| 468 |
+
"""A status string indicating that a package or collection is
|
| 469 |
+
installed and up-to-date."""
|
| 470 |
+
NOT_INSTALLED = "not installed"
|
| 471 |
+
"""A status string indicating that a package or collection is
|
| 472 |
+
not installed."""
|
| 473 |
+
STALE = "out of date"
|
| 474 |
+
"""A status string indicating that a package or collection is
|
| 475 |
+
corrupt or out-of-date."""
|
| 476 |
+
PARTIAL = "partial"
|
| 477 |
+
"""A status string indicating that a collection is partially
|
| 478 |
+
installed (i.e., only some of its packages are installed.)"""
|
| 479 |
+
|
| 480 |
+
# /////////////////////////////////////////////////////////////////
|
| 481 |
+
# Constructor
|
| 482 |
+
# /////////////////////////////////////////////////////////////////
|
| 483 |
+
|
| 484 |
+
def __init__(self, server_index_url=None, download_dir=None):
|
| 485 |
+
self._url = server_index_url or self.DEFAULT_URL
|
| 486 |
+
"""The URL for the data server's index file."""
|
| 487 |
+
|
| 488 |
+
self._collections = {}
|
| 489 |
+
"""Dictionary from collection identifier to ``Collection``"""
|
| 490 |
+
|
| 491 |
+
self._packages = {}
|
| 492 |
+
"""Dictionary from package identifier to ``Package``"""
|
| 493 |
+
|
| 494 |
+
self._download_dir = download_dir
|
| 495 |
+
"""The default directory to which packages will be downloaded."""
|
| 496 |
+
|
| 497 |
+
self._index = None
|
| 498 |
+
"""The XML index file downloaded from the data server"""
|
| 499 |
+
|
| 500 |
+
self._index_timestamp = None
|
| 501 |
+
"""Time at which ``self._index`` was downloaded. If it is more
|
| 502 |
+
than ``INDEX_TIMEOUT`` seconds old, it will be re-downloaded."""
|
| 503 |
+
|
| 504 |
+
self._status_cache = {}
|
| 505 |
+
"""Dictionary from package/collection identifier to status
|
| 506 |
+
string (``INSTALLED``, ``NOT_INSTALLED``, ``STALE``, or
|
| 507 |
+
``PARTIAL``). Cache is used for packages only, not
|
| 508 |
+
collections."""
|
| 509 |
+
|
| 510 |
+
self._errors = None
|
| 511 |
+
"""Flag for telling if all packages got successfully downloaded or not."""
|
| 512 |
+
|
| 513 |
+
# decide where we're going to save things to.
|
| 514 |
+
if self._download_dir is None:
|
| 515 |
+
self._download_dir = self.default_download_dir()
|
| 516 |
+
|
| 517 |
+
# /////////////////////////////////////////////////////////////////
|
| 518 |
+
# Information
|
| 519 |
+
# /////////////////////////////////////////////////////////////////
|
| 520 |
+
|
| 521 |
+
def list(
|
| 522 |
+
self,
|
| 523 |
+
download_dir=None,
|
| 524 |
+
show_packages=True,
|
| 525 |
+
show_collections=True,
|
| 526 |
+
header=True,
|
| 527 |
+
more_prompt=False,
|
| 528 |
+
skip_installed=False,
|
| 529 |
+
):
|
| 530 |
+
lines = 0 # for more_prompt
|
| 531 |
+
if download_dir is None:
|
| 532 |
+
download_dir = self._download_dir
|
| 533 |
+
print("Using default data directory (%s)" % download_dir)
|
| 534 |
+
if header:
|
| 535 |
+
print("=" * (26 + len(self._url)))
|
| 536 |
+
print(" Data server index for <%s>" % self._url)
|
| 537 |
+
print("=" * (26 + len(self._url)))
|
| 538 |
+
lines += 3 # for more_prompt
|
| 539 |
+
stale = partial = False
|
| 540 |
+
|
| 541 |
+
categories = []
|
| 542 |
+
if show_packages:
|
| 543 |
+
categories.append("packages")
|
| 544 |
+
if show_collections:
|
| 545 |
+
categories.append("collections")
|
| 546 |
+
for category in categories:
|
| 547 |
+
print("%s:" % category.capitalize())
|
| 548 |
+
lines += 1 # for more_prompt
|
| 549 |
+
for info in sorted(getattr(self, category)(), key=str):
|
| 550 |
+
status = self.status(info, download_dir)
|
| 551 |
+
if status == self.INSTALLED and skip_installed:
|
| 552 |
+
continue
|
| 553 |
+
if status == self.STALE:
|
| 554 |
+
stale = True
|
| 555 |
+
if status == self.PARTIAL:
|
| 556 |
+
partial = True
|
| 557 |
+
prefix = {
|
| 558 |
+
self.INSTALLED: "*",
|
| 559 |
+
self.STALE: "-",
|
| 560 |
+
self.PARTIAL: "P",
|
| 561 |
+
self.NOT_INSTALLED: " ",
|
| 562 |
+
}[status]
|
| 563 |
+
name = textwrap.fill(
|
| 564 |
+
"-" * 27 + (info.name or info.id), 75, subsequent_indent=27 * " "
|
| 565 |
+
)[27:]
|
| 566 |
+
print(" [{}] {} {}".format(prefix, info.id.ljust(20, "."), name))
|
| 567 |
+
lines += len(name.split("\n")) # for more_prompt
|
| 568 |
+
if more_prompt and lines > 20:
|
| 569 |
+
user_input = input("Hit Enter to continue: ")
|
| 570 |
+
if user_input.lower() in ("x", "q"):
|
| 571 |
+
return
|
| 572 |
+
lines = 0
|
| 573 |
+
print()
|
| 574 |
+
msg = "([*] marks installed packages"
|
| 575 |
+
if stale:
|
| 576 |
+
msg += "; [-] marks out-of-date or corrupt packages"
|
| 577 |
+
if partial:
|
| 578 |
+
msg += "; [P] marks partially installed collections"
|
| 579 |
+
print(textwrap.fill(msg + ")", subsequent_indent=" ", width=76))
|
| 580 |
+
|
| 581 |
+
def packages(self):
|
| 582 |
+
self._update_index()
|
| 583 |
+
return self._packages.values()
|
| 584 |
+
|
| 585 |
+
def corpora(self):
|
| 586 |
+
self._update_index()
|
| 587 |
+
return [pkg for (id, pkg) in self._packages.items() if pkg.subdir == "corpora"]
|
| 588 |
+
|
| 589 |
+
def models(self):
|
| 590 |
+
self._update_index()
|
| 591 |
+
return [pkg for (id, pkg) in self._packages.items() if pkg.subdir != "corpora"]
|
| 592 |
+
|
| 593 |
+
def collections(self):
|
| 594 |
+
self._update_index()
|
| 595 |
+
return self._collections.values()
|
| 596 |
+
|
| 597 |
+
# /////////////////////////////////////////////////////////////////
|
| 598 |
+
# Downloading
|
| 599 |
+
# /////////////////////////////////////////////////////////////////
|
| 600 |
+
|
| 601 |
+
def _info_or_id(self, info_or_id):
|
| 602 |
+
if isinstance(info_or_id, str):
|
| 603 |
+
return self.info(info_or_id)
|
| 604 |
+
else:
|
| 605 |
+
return info_or_id
|
| 606 |
+
|
| 607 |
+
# [xx] When during downloading is it 'safe' to abort? Only unsafe
|
| 608 |
+
# time is *during* an unzip -- we don't want to leave a
|
| 609 |
+
# partially-unzipped corpus in place because we wouldn't notice
|
| 610 |
+
# it. But if we had the exact total size of the unzipped corpus,
|
| 611 |
+
# then that would be fine. Then we could abort anytime we want!
|
| 612 |
+
# So this is really what we should do. That way the threaded
|
| 613 |
+
# downloader in the gui can just kill the download thread anytime
|
| 614 |
+
# it wants.
|
| 615 |
+
|
| 616 |
+
def incr_download(self, info_or_id, download_dir=None, force=False):
|
| 617 |
+
# If they didn't specify a download_dir, then use the default one.
|
| 618 |
+
if download_dir is None:
|
| 619 |
+
download_dir = self._download_dir
|
| 620 |
+
yield SelectDownloadDirMessage(download_dir)
|
| 621 |
+
|
| 622 |
+
# If they gave us a list of ids, then download each one.
|
| 623 |
+
if isinstance(info_or_id, (list, tuple)):
|
| 624 |
+
yield from self._download_list(info_or_id, download_dir, force)
|
| 625 |
+
return
|
| 626 |
+
|
| 627 |
+
# Look up the requested collection or package.
|
| 628 |
+
try:
|
| 629 |
+
info = self._info_or_id(info_or_id)
|
| 630 |
+
except (OSError, ValueError) as e:
|
| 631 |
+
yield ErrorMessage(None, f"Error loading {info_or_id}: {e}")
|
| 632 |
+
return
|
| 633 |
+
|
| 634 |
+
# Handle collections.
|
| 635 |
+
if isinstance(info, Collection):
|
| 636 |
+
yield StartCollectionMessage(info)
|
| 637 |
+
yield from self.incr_download(info.children, download_dir, force)
|
| 638 |
+
yield FinishCollectionMessage(info)
|
| 639 |
+
|
| 640 |
+
# Handle Packages (delegate to a helper function).
|
| 641 |
+
else:
|
| 642 |
+
yield from self._download_package(info, download_dir, force)
|
| 643 |
+
|
| 644 |
+
def _num_packages(self, item):
|
| 645 |
+
if isinstance(item, Package):
|
| 646 |
+
return 1
|
| 647 |
+
else:
|
| 648 |
+
return len(item.packages)
|
| 649 |
+
|
| 650 |
+
def _download_list(self, items, download_dir, force):
|
| 651 |
+
# Look up the requested items.
|
| 652 |
+
for i in range(len(items)):
|
| 653 |
+
try:
|
| 654 |
+
items[i] = self._info_or_id(items[i])
|
| 655 |
+
except (OSError, ValueError) as e:
|
| 656 |
+
yield ErrorMessage(items[i], e)
|
| 657 |
+
return
|
| 658 |
+
|
| 659 |
+
# Download each item, re-scaling their progress.
|
| 660 |
+
num_packages = sum(self._num_packages(item) for item in items)
|
| 661 |
+
progress = 0
|
| 662 |
+
for i, item in enumerate(items):
|
| 663 |
+
if isinstance(item, Package):
|
| 664 |
+
delta = 1.0 / num_packages
|
| 665 |
+
else:
|
| 666 |
+
delta = len(item.packages) / num_packages
|
| 667 |
+
for msg in self.incr_download(item, download_dir, force):
|
| 668 |
+
if isinstance(msg, ProgressMessage):
|
| 669 |
+
yield ProgressMessage(progress + msg.progress * delta)
|
| 670 |
+
else:
|
| 671 |
+
yield msg
|
| 672 |
+
|
| 673 |
+
progress += 100 * delta
|
| 674 |
+
|
| 675 |
+
def _download_package(self, info, download_dir, force):
|
| 676 |
+
yield StartPackageMessage(info)
|
| 677 |
+
yield ProgressMessage(0)
|
| 678 |
+
|
| 679 |
+
# Do we already have the current version?
|
| 680 |
+
status = self.status(info, download_dir)
|
| 681 |
+
if not force and status == self.INSTALLED:
|
| 682 |
+
yield UpToDateMessage(info)
|
| 683 |
+
yield ProgressMessage(100)
|
| 684 |
+
yield FinishPackageMessage(info)
|
| 685 |
+
return
|
| 686 |
+
|
| 687 |
+
# Remove the package from our status cache
|
| 688 |
+
self._status_cache.pop(info.id, None)
|
| 689 |
+
|
| 690 |
+
# Check for (and remove) any old/stale version.
|
| 691 |
+
filepath = os.path.join(download_dir, info.filename)
|
| 692 |
+
if os.path.exists(filepath):
|
| 693 |
+
if status == self.STALE:
|
| 694 |
+
yield StaleMessage(info)
|
| 695 |
+
os.remove(filepath)
|
| 696 |
+
|
| 697 |
+
# Ensure the download_dir exists
|
| 698 |
+
if not os.path.exists(download_dir):
|
| 699 |
+
os.makedirs(download_dir)
|
| 700 |
+
if not os.path.exists(os.path.join(download_dir, info.subdir)):
|
| 701 |
+
os.makedirs(os.path.join(download_dir, info.subdir))
|
| 702 |
+
|
| 703 |
+
# Download the file. This will raise an IOError if the url
|
| 704 |
+
# is not found.
|
| 705 |
+
yield StartDownloadMessage(info)
|
| 706 |
+
yield ProgressMessage(5)
|
| 707 |
+
try:
|
| 708 |
+
infile = urlopen(info.url)
|
| 709 |
+
with open(filepath, "wb") as outfile:
|
| 710 |
+
num_blocks = max(1, info.size / (1024 * 16))
|
| 711 |
+
for block in itertools.count():
|
| 712 |
+
s = infile.read(1024 * 16) # 16k blocks.
|
| 713 |
+
outfile.write(s)
|
| 714 |
+
if not s:
|
| 715 |
+
break
|
| 716 |
+
if block % 2 == 0: # how often?
|
| 717 |
+
yield ProgressMessage(min(80, 5 + 75 * (block / num_blocks)))
|
| 718 |
+
infile.close()
|
| 719 |
+
except OSError as e:
|
| 720 |
+
yield ErrorMessage(
|
| 721 |
+
info,
|
| 722 |
+
"Error downloading %r from <%s>:" "\n %s" % (info.id, info.url, e),
|
| 723 |
+
)
|
| 724 |
+
return
|
| 725 |
+
yield FinishDownloadMessage(info)
|
| 726 |
+
yield ProgressMessage(80)
|
| 727 |
+
|
| 728 |
+
# If it's a zipfile, uncompress it.
|
| 729 |
+
if info.filename.endswith(".zip"):
|
| 730 |
+
zipdir = os.path.join(download_dir, info.subdir)
|
| 731 |
+
# Unzip if we're unzipping by default; *or* if it's already
|
| 732 |
+
# been unzipped (presumably a previous version).
|
| 733 |
+
if info.unzip or os.path.exists(os.path.join(zipdir, info.id)):
|
| 734 |
+
yield StartUnzipMessage(info)
|
| 735 |
+
for msg in _unzip_iter(filepath, zipdir, verbose=False):
|
| 736 |
+
# Somewhat of a hack, but we need a proper package reference
|
| 737 |
+
msg.package = info
|
| 738 |
+
yield msg
|
| 739 |
+
yield FinishUnzipMessage(info)
|
| 740 |
+
|
| 741 |
+
yield FinishPackageMessage(info)
|
| 742 |
+
|
| 743 |
+
def download(
|
| 744 |
+
self,
|
| 745 |
+
info_or_id=None,
|
| 746 |
+
download_dir=None,
|
| 747 |
+
quiet=False,
|
| 748 |
+
force=False,
|
| 749 |
+
prefix="[nltk_data] ",
|
| 750 |
+
halt_on_error=True,
|
| 751 |
+
raise_on_error=False,
|
| 752 |
+
print_error_to=sys.stderr,
|
| 753 |
+
):
|
| 754 |
+
|
| 755 |
+
print_to = functools.partial(print, file=print_error_to)
|
| 756 |
+
# If no info or id is given, then use the interactive shell.
|
| 757 |
+
if info_or_id is None:
|
| 758 |
+
# [xx] hmm -- changing self._download_dir here seems like
|
| 759 |
+
# the wrong thing to do. Maybe the _interactive_download
|
| 760 |
+
# function should make a new copy of self to use?
|
| 761 |
+
if download_dir is not None:
|
| 762 |
+
self._download_dir = download_dir
|
| 763 |
+
self._interactive_download()
|
| 764 |
+
return True
|
| 765 |
+
|
| 766 |
+
else:
|
| 767 |
+
# Define a helper function for displaying output:
|
| 768 |
+
def show(s, prefix2=""):
|
| 769 |
+
print_to(
|
| 770 |
+
textwrap.fill(
|
| 771 |
+
s,
|
| 772 |
+
initial_indent=prefix + prefix2,
|
| 773 |
+
subsequent_indent=prefix + prefix2 + " " * 4,
|
| 774 |
+
)
|
| 775 |
+
)
|
| 776 |
+
|
| 777 |
+
for msg in self.incr_download(info_or_id, download_dir, force):
|
| 778 |
+
# Error messages
|
| 779 |
+
if isinstance(msg, ErrorMessage):
|
| 780 |
+
show(msg.message)
|
| 781 |
+
if raise_on_error:
|
| 782 |
+
raise ValueError(msg.message)
|
| 783 |
+
if halt_on_error:
|
| 784 |
+
return False
|
| 785 |
+
self._errors = True
|
| 786 |
+
if not quiet:
|
| 787 |
+
print_to("Error installing package. Retry? [n/y/e]")
|
| 788 |
+
choice = input().strip()
|
| 789 |
+
if choice in ["y", "Y"]:
|
| 790 |
+
if not self.download(
|
| 791 |
+
msg.package.id,
|
| 792 |
+
download_dir,
|
| 793 |
+
quiet,
|
| 794 |
+
force,
|
| 795 |
+
prefix,
|
| 796 |
+
halt_on_error,
|
| 797 |
+
raise_on_error,
|
| 798 |
+
):
|
| 799 |
+
return False
|
| 800 |
+
elif choice in ["e", "E"]:
|
| 801 |
+
return False
|
| 802 |
+
|
| 803 |
+
# All other messages
|
| 804 |
+
if not quiet:
|
| 805 |
+
# Collection downloading messages:
|
| 806 |
+
if isinstance(msg, StartCollectionMessage):
|
| 807 |
+
show("Downloading collection %r" % msg.collection.id)
|
| 808 |
+
prefix += " | "
|
| 809 |
+
print_to(prefix)
|
| 810 |
+
elif isinstance(msg, FinishCollectionMessage):
|
| 811 |
+
print_to(prefix)
|
| 812 |
+
prefix = prefix[:-4]
|
| 813 |
+
if self._errors:
|
| 814 |
+
show(
|
| 815 |
+
"Downloaded collection %r with errors"
|
| 816 |
+
% msg.collection.id
|
| 817 |
+
)
|
| 818 |
+
else:
|
| 819 |
+
show("Done downloading collection %s" % msg.collection.id)
|
| 820 |
+
|
| 821 |
+
# Package downloading messages:
|
| 822 |
+
elif isinstance(msg, StartPackageMessage):
|
| 823 |
+
show(
|
| 824 |
+
"Downloading package %s to %s..."
|
| 825 |
+
% (msg.package.id, download_dir)
|
| 826 |
+
)
|
| 827 |
+
elif isinstance(msg, UpToDateMessage):
|
| 828 |
+
show("Package %s is already up-to-date!" % msg.package.id, " ")
|
| 829 |
+
# elif isinstance(msg, StaleMessage):
|
| 830 |
+
# show('Package %s is out-of-date or corrupt' %
|
| 831 |
+
# msg.package.id, ' ')
|
| 832 |
+
elif isinstance(msg, StartUnzipMessage):
|
| 833 |
+
show("Unzipping %s." % msg.package.filename, " ")
|
| 834 |
+
|
| 835 |
+
# Data directory message:
|
| 836 |
+
elif isinstance(msg, SelectDownloadDirMessage):
|
| 837 |
+
download_dir = msg.download_dir
|
| 838 |
+
return True
|
| 839 |
+
|
| 840 |
+
def is_stale(self, info_or_id, download_dir=None):
|
| 841 |
+
return self.status(info_or_id, download_dir) == self.STALE
|
| 842 |
+
|
| 843 |
+
def is_installed(self, info_or_id, download_dir=None):
|
| 844 |
+
return self.status(info_or_id, download_dir) == self.INSTALLED
|
| 845 |
+
|
| 846 |
+
def clear_status_cache(self, id=None):
|
| 847 |
+
if id is None:
|
| 848 |
+
self._status_cache.clear()
|
| 849 |
+
else:
|
| 850 |
+
self._status_cache.pop(id, None)
|
| 851 |
+
|
| 852 |
+
def status(self, info_or_id, download_dir=None):
|
| 853 |
+
"""
|
| 854 |
+
Return a constant describing the status of the given package
|
| 855 |
+
or collection. Status can be one of ``INSTALLED``,
|
| 856 |
+
``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``.
|
| 857 |
+
"""
|
| 858 |
+
if download_dir is None:
|
| 859 |
+
download_dir = self._download_dir
|
| 860 |
+
info = self._info_or_id(info_or_id)
|
| 861 |
+
|
| 862 |
+
# Handle collections:
|
| 863 |
+
if isinstance(info, Collection):
|
| 864 |
+
pkg_status = [self.status(pkg.id) for pkg in info.packages]
|
| 865 |
+
if self.STALE in pkg_status:
|
| 866 |
+
return self.STALE
|
| 867 |
+
elif self.PARTIAL in pkg_status:
|
| 868 |
+
return self.PARTIAL
|
| 869 |
+
elif self.INSTALLED in pkg_status and self.NOT_INSTALLED in pkg_status:
|
| 870 |
+
return self.PARTIAL
|
| 871 |
+
elif self.NOT_INSTALLED in pkg_status:
|
| 872 |
+
return self.NOT_INSTALLED
|
| 873 |
+
else:
|
| 874 |
+
return self.INSTALLED
|
| 875 |
+
|
| 876 |
+
# Handle packages:
|
| 877 |
+
else:
|
| 878 |
+
filepath = os.path.join(download_dir, info.filename)
|
| 879 |
+
if download_dir != self._download_dir:
|
| 880 |
+
return self._pkg_status(info, filepath)
|
| 881 |
+
else:
|
| 882 |
+
if info.id not in self._status_cache:
|
| 883 |
+
self._status_cache[info.id] = self._pkg_status(info, filepath)
|
| 884 |
+
return self._status_cache[info.id]
|
| 885 |
+
|
| 886 |
+
def _pkg_status(self, info, filepath):
|
| 887 |
+
if not os.path.exists(filepath):
|
| 888 |
+
return self.NOT_INSTALLED
|
| 889 |
+
|
| 890 |
+
# Check if the file has the correct size.
|
| 891 |
+
try:
|
| 892 |
+
filestat = os.stat(filepath)
|
| 893 |
+
except OSError:
|
| 894 |
+
return self.NOT_INSTALLED
|
| 895 |
+
if filestat.st_size != int(info.size):
|
| 896 |
+
return self.STALE
|
| 897 |
+
|
| 898 |
+
# Check if the file's checksum matches
|
| 899 |
+
if md5_hexdigest(filepath) != info.checksum:
|
| 900 |
+
return self.STALE
|
| 901 |
+
|
| 902 |
+
# If it's a zipfile, and it's been at least partially
|
| 903 |
+
# unzipped, then check if it's been fully unzipped.
|
| 904 |
+
if filepath.endswith(".zip"):
|
| 905 |
+
unzipdir = filepath[:-4]
|
| 906 |
+
if not os.path.exists(unzipdir):
|
| 907 |
+
return self.INSTALLED # but not unzipped -- ok!
|
| 908 |
+
if not os.path.isdir(unzipdir):
|
| 909 |
+
return self.STALE
|
| 910 |
+
|
| 911 |
+
unzipped_size = sum(
|
| 912 |
+
os.stat(os.path.join(d, f)).st_size
|
| 913 |
+
for d, _, files in os.walk(unzipdir)
|
| 914 |
+
for f in files
|
| 915 |
+
)
|
| 916 |
+
if unzipped_size != info.unzipped_size:
|
| 917 |
+
return self.STALE
|
| 918 |
+
|
| 919 |
+
# Otherwise, everything looks good.
|
| 920 |
+
return self.INSTALLED
|
| 921 |
+
|
| 922 |
+
def update(self, quiet=False, prefix="[nltk_data] "):
|
| 923 |
+
"""
|
| 924 |
+
Re-download any packages whose status is STALE.
|
| 925 |
+
"""
|
| 926 |
+
self.clear_status_cache()
|
| 927 |
+
for pkg in self.packages():
|
| 928 |
+
if self.status(pkg) == self.STALE:
|
| 929 |
+
self.download(pkg, quiet=quiet, prefix=prefix)
|
| 930 |
+
|
| 931 |
+
# /////////////////////////////////////////////////////////////////
|
| 932 |
+
# Index
|
| 933 |
+
# /////////////////////////////////////////////////////////////////
|
| 934 |
+
|
| 935 |
+
def _update_index(self, url=None):
|
| 936 |
+
"""A helper function that ensures that self._index is
|
| 937 |
+
up-to-date. If the index is older than self.INDEX_TIMEOUT,
|
| 938 |
+
then download it again."""
|
| 939 |
+
# Check if the index is already up-to-date. If so, do nothing.
|
| 940 |
+
if not (
|
| 941 |
+
self._index is None
|
| 942 |
+
or url is not None
|
| 943 |
+
or time.time() - self._index_timestamp > self.INDEX_TIMEOUT
|
| 944 |
+
):
|
| 945 |
+
return
|
| 946 |
+
|
| 947 |
+
# If a URL was specified, then update our URL.
|
| 948 |
+
self._url = url or self._url
|
| 949 |
+
|
| 950 |
+
# Download the index file.
|
| 951 |
+
self._index = nltk.internals.ElementWrapper(
|
| 952 |
+
ElementTree.parse(urlopen(self._url)).getroot()
|
| 953 |
+
)
|
| 954 |
+
self._index_timestamp = time.time()
|
| 955 |
+
|
| 956 |
+
# Build a dictionary of packages.
|
| 957 |
+
packages = [Package.fromxml(p) for p in self._index.findall("packages/package")]
|
| 958 |
+
self._packages = {p.id: p for p in packages}
|
| 959 |
+
|
| 960 |
+
# Build a dictionary of collections.
|
| 961 |
+
collections = [
|
| 962 |
+
Collection.fromxml(c) for c in self._index.findall("collections/collection")
|
| 963 |
+
]
|
| 964 |
+
self._collections = {c.id: c for c in collections}
|
| 965 |
+
|
| 966 |
+
# Replace identifiers with actual children in collection.children.
|
| 967 |
+
for collection in self._collections.values():
|
| 968 |
+
for i, child_id in enumerate(collection.children):
|
| 969 |
+
if child_id in self._packages:
|
| 970 |
+
collection.children[i] = self._packages[child_id]
|
| 971 |
+
elif child_id in self._collections:
|
| 972 |
+
collection.children[i] = self._collections[child_id]
|
| 973 |
+
else:
|
| 974 |
+
print(
|
| 975 |
+
"removing collection member with no package: {}".format(
|
| 976 |
+
child_id
|
| 977 |
+
)
|
| 978 |
+
)
|
| 979 |
+
del collection.children[i]
|
| 980 |
+
|
| 981 |
+
# Fill in collection.packages for each collection.
|
| 982 |
+
for collection in self._collections.values():
|
| 983 |
+
packages = {}
|
| 984 |
+
queue = [collection]
|
| 985 |
+
for child in queue:
|
| 986 |
+
if isinstance(child, Collection):
|
| 987 |
+
queue.extend(child.children)
|
| 988 |
+
elif isinstance(child, Package):
|
| 989 |
+
packages[child.id] = child
|
| 990 |
+
else:
|
| 991 |
+
pass
|
| 992 |
+
collection.packages = packages.values()
|
| 993 |
+
|
| 994 |
+
# Flush the status cache
|
| 995 |
+
self._status_cache.clear()
|
| 996 |
+
|
| 997 |
+
def index(self):
|
| 998 |
+
"""
|
| 999 |
+
Return the XML index describing the packages available from
|
| 1000 |
+
the data server. If necessary, this index will be downloaded
|
| 1001 |
+
from the data server.
|
| 1002 |
+
"""
|
| 1003 |
+
self._update_index()
|
| 1004 |
+
return self._index
|
| 1005 |
+
|
| 1006 |
+
def info(self, id):
|
| 1007 |
+
"""Return the ``Package`` or ``Collection`` record for the
|
| 1008 |
+
given item."""
|
| 1009 |
+
self._update_index()
|
| 1010 |
+
if id in self._packages:
|
| 1011 |
+
return self._packages[id]
|
| 1012 |
+
if id in self._collections:
|
| 1013 |
+
return self._collections[id]
|
| 1014 |
+
raise ValueError("Package %r not found in index" % id)
|
| 1015 |
+
|
| 1016 |
+
def xmlinfo(self, id):
|
| 1017 |
+
"""Return the XML info record for the given item"""
|
| 1018 |
+
self._update_index()
|
| 1019 |
+
for package in self._index.findall("packages/package"):
|
| 1020 |
+
if package.get("id") == id:
|
| 1021 |
+
return package
|
| 1022 |
+
for collection in self._index.findall("collections/collection"):
|
| 1023 |
+
if collection.get("id") == id:
|
| 1024 |
+
return collection
|
| 1025 |
+
raise ValueError("Package %r not found in index" % id)
|
| 1026 |
+
|
| 1027 |
+
# /////////////////////////////////////////////////////////////////
|
| 1028 |
+
# URL & Data Directory
|
| 1029 |
+
# /////////////////////////////////////////////////////////////////
|
| 1030 |
+
|
| 1031 |
+
def _get_url(self):
|
| 1032 |
+
"""The URL for the data server's index file."""
|
| 1033 |
+
return self._url
|
| 1034 |
+
|
| 1035 |
+
def _set_url(self, url):
|
| 1036 |
+
"""
|
| 1037 |
+
Set a new URL for the data server. If we're unable to contact
|
| 1038 |
+
the given url, then the original url is kept.
|
| 1039 |
+
"""
|
| 1040 |
+
original_url = self._url
|
| 1041 |
+
try:
|
| 1042 |
+
self._update_index(url)
|
| 1043 |
+
except:
|
| 1044 |
+
self._url = original_url
|
| 1045 |
+
raise
|
| 1046 |
+
|
| 1047 |
+
url = property(_get_url, _set_url)
|
| 1048 |
+
|
| 1049 |
+
def default_download_dir(self):
|
| 1050 |
+
"""
|
| 1051 |
+
Return the directory to which packages will be downloaded by
|
| 1052 |
+
default. This value can be overridden using the constructor,
|
| 1053 |
+
or on a case-by-case basis using the ``download_dir`` argument when
|
| 1054 |
+
calling ``download()``.
|
| 1055 |
+
|
| 1056 |
+
On Windows, the default download directory is
|
| 1057 |
+
``PYTHONHOME/lib/nltk``, where *PYTHONHOME* is the
|
| 1058 |
+
directory containing Python, e.g. ``C:\\Python25``.
|
| 1059 |
+
|
| 1060 |
+
On all other platforms, the default directory is the first of
|
| 1061 |
+
the following which exists or which can be created with write
|
| 1062 |
+
permission: ``/usr/share/nltk_data``, ``/usr/local/share/nltk_data``,
|
| 1063 |
+
``/usr/lib/nltk_data``, ``/usr/local/lib/nltk_data``, ``~/nltk_data``.
|
| 1064 |
+
"""
|
| 1065 |
+
# Check if we are on GAE where we cannot write into filesystem.
|
| 1066 |
+
if "APPENGINE_RUNTIME" in os.environ:
|
| 1067 |
+
return
|
| 1068 |
+
|
| 1069 |
+
# Check if we have sufficient permissions to install in a
|
| 1070 |
+
# variety of system-wide locations.
|
| 1071 |
+
for nltkdir in nltk.data.path:
|
| 1072 |
+
if os.path.exists(nltkdir) and nltk.internals.is_writable(nltkdir):
|
| 1073 |
+
return nltkdir
|
| 1074 |
+
|
| 1075 |
+
# On Windows, use %APPDATA%
|
| 1076 |
+
if sys.platform == "win32" and "APPDATA" in os.environ:
|
| 1077 |
+
homedir = os.environ["APPDATA"]
|
| 1078 |
+
|
| 1079 |
+
# Otherwise, install in the user's home directory.
|
| 1080 |
+
else:
|
| 1081 |
+
homedir = os.path.expanduser("~/")
|
| 1082 |
+
if homedir == "~/":
|
| 1083 |
+
raise ValueError("Could not find a default download directory")
|
| 1084 |
+
|
| 1085 |
+
# append "nltk_data" to the home directory
|
| 1086 |
+
return os.path.join(homedir, "nltk_data")
|
| 1087 |
+
|
| 1088 |
+
def _get_download_dir(self):
|
| 1089 |
+
"""
|
| 1090 |
+
The default directory to which packages will be downloaded.
|
| 1091 |
+
This defaults to the value returned by ``default_download_dir()``.
|
| 1092 |
+
To override this default on a case-by-case basis, use the
|
| 1093 |
+
``download_dir`` argument when calling ``download()``.
|
| 1094 |
+
"""
|
| 1095 |
+
return self._download_dir
|
| 1096 |
+
|
| 1097 |
+
def _set_download_dir(self, download_dir):
|
| 1098 |
+
self._download_dir = download_dir
|
| 1099 |
+
# Clear the status cache.
|
| 1100 |
+
self._status_cache.clear()
|
| 1101 |
+
|
| 1102 |
+
download_dir = property(_get_download_dir, _set_download_dir)
|
| 1103 |
+
|
| 1104 |
+
# /////////////////////////////////////////////////////////////////
|
| 1105 |
+
# Interactive Shell
|
| 1106 |
+
# /////////////////////////////////////////////////////////////////
|
| 1107 |
+
|
| 1108 |
+
def _interactive_download(self):
|
| 1109 |
+
# Try the GUI first; if that doesn't work, try the simple
|
| 1110 |
+
# interactive shell.
|
| 1111 |
+
if TKINTER:
|
| 1112 |
+
try:
|
| 1113 |
+
DownloaderGUI(self).mainloop()
|
| 1114 |
+
except TclError:
|
| 1115 |
+
DownloaderShell(self).run()
|
| 1116 |
+
else:
|
| 1117 |
+
DownloaderShell(self).run()
|
| 1118 |
+
|
| 1119 |
+
|
| 1120 |
+
class DownloaderShell:
|
| 1121 |
+
def __init__(self, dataserver):
|
| 1122 |
+
self._ds = dataserver
|
| 1123 |
+
|
| 1124 |
+
def _simple_interactive_menu(self, *options):
|
| 1125 |
+
print("-" * 75)
|
| 1126 |
+
spc = (68 - sum(len(o) for o in options)) // (len(options) - 1) * " "
|
| 1127 |
+
print(" " + spc.join(options))
|
| 1128 |
+
print("-" * 75)
|
| 1129 |
+
|
| 1130 |
+
def run(self):
|
| 1131 |
+
print("NLTK Downloader")
|
| 1132 |
+
while True:
|
| 1133 |
+
self._simple_interactive_menu(
|
| 1134 |
+
"d) Download",
|
| 1135 |
+
"l) List",
|
| 1136 |
+
" u) Update",
|
| 1137 |
+
"c) Config",
|
| 1138 |
+
"h) Help",
|
| 1139 |
+
"q) Quit",
|
| 1140 |
+
)
|
| 1141 |
+
user_input = input("Downloader> ").strip()
|
| 1142 |
+
if not user_input:
|
| 1143 |
+
print()
|
| 1144 |
+
continue
|
| 1145 |
+
command = user_input.lower().split()[0]
|
| 1146 |
+
args = user_input.split()[1:]
|
| 1147 |
+
try:
|
| 1148 |
+
if command == "l":
|
| 1149 |
+
print()
|
| 1150 |
+
self._ds.list(self._ds.download_dir, header=False, more_prompt=True)
|
| 1151 |
+
elif command == "h":
|
| 1152 |
+
self._simple_interactive_help()
|
| 1153 |
+
elif command == "c":
|
| 1154 |
+
self._simple_interactive_config()
|
| 1155 |
+
elif command in ("q", "x"):
|
| 1156 |
+
return
|
| 1157 |
+
elif command == "d":
|
| 1158 |
+
self._simple_interactive_download(args)
|
| 1159 |
+
elif command == "u":
|
| 1160 |
+
self._simple_interactive_update()
|
| 1161 |
+
else:
|
| 1162 |
+
print("Command %r unrecognized" % user_input)
|
| 1163 |
+
except HTTPError as e:
|
| 1164 |
+
print("Error reading from server: %s" % e)
|
| 1165 |
+
except URLError as e:
|
| 1166 |
+
print("Error connecting to server: %s" % e.reason)
|
| 1167 |
+
# try checking if user_input is a package name, &
|
| 1168 |
+
# downloading it?
|
| 1169 |
+
print()
|
| 1170 |
+
|
| 1171 |
+
def _simple_interactive_download(self, args):
|
| 1172 |
+
if args:
|
| 1173 |
+
for arg in args:
|
| 1174 |
+
try:
|
| 1175 |
+
self._ds.download(arg, prefix=" ")
|
| 1176 |
+
except (OSError, ValueError) as e:
|
| 1177 |
+
print(e)
|
| 1178 |
+
else:
|
| 1179 |
+
while True:
|
| 1180 |
+
print()
|
| 1181 |
+
print("Download which package (l=list; x=cancel)?")
|
| 1182 |
+
user_input = input(" Identifier> ")
|
| 1183 |
+
if user_input.lower() == "l":
|
| 1184 |
+
self._ds.list(
|
| 1185 |
+
self._ds.download_dir,
|
| 1186 |
+
header=False,
|
| 1187 |
+
more_prompt=True,
|
| 1188 |
+
skip_installed=True,
|
| 1189 |
+
)
|
| 1190 |
+
continue
|
| 1191 |
+
elif user_input.lower() in ("x", "q", ""):
|
| 1192 |
+
return
|
| 1193 |
+
elif user_input:
|
| 1194 |
+
for id in user_input.split():
|
| 1195 |
+
try:
|
| 1196 |
+
self._ds.download(id, prefix=" ")
|
| 1197 |
+
except (OSError, ValueError) as e:
|
| 1198 |
+
print(e)
|
| 1199 |
+
break
|
| 1200 |
+
|
| 1201 |
+
def _simple_interactive_update(self):
|
| 1202 |
+
while True:
|
| 1203 |
+
stale_packages = []
|
| 1204 |
+
stale = partial = False
|
| 1205 |
+
for info in sorted(getattr(self._ds, "packages")(), key=str):
|
| 1206 |
+
if self._ds.status(info) == self._ds.STALE:
|
| 1207 |
+
stale_packages.append((info.id, info.name))
|
| 1208 |
+
|
| 1209 |
+
print()
|
| 1210 |
+
if stale_packages:
|
| 1211 |
+
print("Will update following packages (o=ok; x=cancel)")
|
| 1212 |
+
for pid, pname in stale_packages:
|
| 1213 |
+
name = textwrap.fill(
|
| 1214 |
+
"-" * 27 + (pname), 75, subsequent_indent=27 * " "
|
| 1215 |
+
)[27:]
|
| 1216 |
+
print(" [ ] {} {}".format(pid.ljust(20, "."), name))
|
| 1217 |
+
print()
|
| 1218 |
+
|
| 1219 |
+
user_input = input(" Identifier> ")
|
| 1220 |
+
if user_input.lower() == "o":
|
| 1221 |
+
for pid, pname in stale_packages:
|
| 1222 |
+
try:
|
| 1223 |
+
self._ds.download(pid, prefix=" ")
|
| 1224 |
+
except (OSError, ValueError) as e:
|
| 1225 |
+
print(e)
|
| 1226 |
+
break
|
| 1227 |
+
elif user_input.lower() in ("x", "q", ""):
|
| 1228 |
+
return
|
| 1229 |
+
else:
|
| 1230 |
+
print("Nothing to update.")
|
| 1231 |
+
return
|
| 1232 |
+
|
| 1233 |
+
def _simple_interactive_help(self):
|
| 1234 |
+
print()
|
| 1235 |
+
print("Commands:")
|
| 1236 |
+
print(
|
| 1237 |
+
" d) Download a package or collection u) Update out of date packages"
|
| 1238 |
+
)
|
| 1239 |
+
print(" l) List packages & collections h) Help")
|
| 1240 |
+
print(" c) View & Modify Configuration q) Quit")
|
| 1241 |
+
|
| 1242 |
+
def _show_config(self):
|
| 1243 |
+
print()
|
| 1244 |
+
print("Data Server:")
|
| 1245 |
+
print(" - URL: <%s>" % self._ds.url)
|
| 1246 |
+
print(" - %d Package Collections Available" % len(self._ds.collections()))
|
| 1247 |
+
print(" - %d Individual Packages Available" % len(self._ds.packages()))
|
| 1248 |
+
print()
|
| 1249 |
+
print("Local Machine:")
|
| 1250 |
+
print(" - Data directory: %s" % self._ds.download_dir)
|
| 1251 |
+
|
| 1252 |
+
def _simple_interactive_config(self):
|
| 1253 |
+
self._show_config()
|
| 1254 |
+
while True:
|
| 1255 |
+
print()
|
| 1256 |
+
self._simple_interactive_menu(
|
| 1257 |
+
"s) Show Config", "u) Set Server URL", "d) Set Data Dir", "m) Main Menu"
|
| 1258 |
+
)
|
| 1259 |
+
user_input = input("Config> ").strip().lower()
|
| 1260 |
+
if user_input == "s":
|
| 1261 |
+
self._show_config()
|
| 1262 |
+
elif user_input == "d":
|
| 1263 |
+
new_dl_dir = input(" New Directory> ").strip()
|
| 1264 |
+
if new_dl_dir in ("", "x", "q", "X", "Q"):
|
| 1265 |
+
print(" Cancelled!")
|
| 1266 |
+
elif os.path.isdir(new_dl_dir):
|
| 1267 |
+
self._ds.download_dir = new_dl_dir
|
| 1268 |
+
else:
|
| 1269 |
+
print("Directory %r not found! Create it first." % new_dl_dir)
|
| 1270 |
+
elif user_input == "u":
|
| 1271 |
+
new_url = input(" New URL> ").strip()
|
| 1272 |
+
if new_url in ("", "x", "q", "X", "Q"):
|
| 1273 |
+
print(" Cancelled!")
|
| 1274 |
+
else:
|
| 1275 |
+
if not new_url.startswith(("http://", "https://")):
|
| 1276 |
+
new_url = "http://" + new_url
|
| 1277 |
+
try:
|
| 1278 |
+
self._ds.url = new_url
|
| 1279 |
+
except Exception as e:
|
| 1280 |
+
print(f"Error reading <{new_url!r}>:\n {e}")
|
| 1281 |
+
elif user_input == "m":
|
| 1282 |
+
break
|
| 1283 |
+
|
| 1284 |
+
|
| 1285 |
+
class DownloaderGUI:
|
| 1286 |
+
"""
|
| 1287 |
+
Graphical interface for downloading packages from the NLTK data
|
| 1288 |
+
server.
|
| 1289 |
+
"""
|
| 1290 |
+
|
| 1291 |
+
# /////////////////////////////////////////////////////////////////
|
| 1292 |
+
# Column Configuration
|
| 1293 |
+
# /////////////////////////////////////////////////////////////////
|
| 1294 |
+
|
| 1295 |
+
COLUMNS = [
|
| 1296 |
+
"",
|
| 1297 |
+
"Identifier",
|
| 1298 |
+
"Name",
|
| 1299 |
+
"Size",
|
| 1300 |
+
"Status",
|
| 1301 |
+
"Unzipped Size",
|
| 1302 |
+
"Copyright",
|
| 1303 |
+
"Contact",
|
| 1304 |
+
"License",
|
| 1305 |
+
"Author",
|
| 1306 |
+
"Subdir",
|
| 1307 |
+
"Checksum",
|
| 1308 |
+
]
|
| 1309 |
+
"""A list of the names of columns. This controls the order in
|
| 1310 |
+
which the columns will appear. If this is edited, then
|
| 1311 |
+
``_package_to_columns()`` may need to be edited to match."""
|
| 1312 |
+
|
| 1313 |
+
COLUMN_WEIGHTS = {"": 0, "Name": 5, "Size": 0, "Status": 0}
|
| 1314 |
+
"""A dictionary specifying how columns should be resized when the
|
| 1315 |
+
table is resized. Columns with weight 0 will not be resized at
|
| 1316 |
+
all; and columns with high weight will be resized more.
|
| 1317 |
+
Default weight (for columns not explicitly listed) is 1."""
|
| 1318 |
+
|
| 1319 |
+
COLUMN_WIDTHS = {
|
| 1320 |
+
"": 1,
|
| 1321 |
+
"Identifier": 20,
|
| 1322 |
+
"Name": 45,
|
| 1323 |
+
"Size": 10,
|
| 1324 |
+
"Unzipped Size": 10,
|
| 1325 |
+
"Status": 12,
|
| 1326 |
+
}
|
| 1327 |
+
"""A dictionary specifying how wide each column should be, in
|
| 1328 |
+
characters. The default width (for columns not explicitly
|
| 1329 |
+
listed) is specified by ``DEFAULT_COLUMN_WIDTH``."""
|
| 1330 |
+
|
| 1331 |
+
DEFAULT_COLUMN_WIDTH = 30
|
| 1332 |
+
"""The default width for columns that are not explicitly listed
|
| 1333 |
+
in ``COLUMN_WIDTHS``."""
|
| 1334 |
+
|
| 1335 |
+
INITIAL_COLUMNS = ["", "Identifier", "Name", "Size", "Status"]
|
| 1336 |
+
"""The set of columns that should be displayed by default."""
|
| 1337 |
+
|
| 1338 |
+
# Perform a few import-time sanity checks to make sure that the
|
| 1339 |
+
# column configuration variables are defined consistently:
|
| 1340 |
+
for c in COLUMN_WEIGHTS:
|
| 1341 |
+
assert c in COLUMNS
|
| 1342 |
+
for c in COLUMN_WIDTHS:
|
| 1343 |
+
assert c in COLUMNS
|
| 1344 |
+
for c in INITIAL_COLUMNS:
|
| 1345 |
+
assert c in COLUMNS
|
| 1346 |
+
|
| 1347 |
+
# /////////////////////////////////////////////////////////////////
|
| 1348 |
+
# Color Configuration
|
| 1349 |
+
# /////////////////////////////////////////////////////////////////
|
| 1350 |
+
|
| 1351 |
+
_BACKDROP_COLOR = ("#000", "#ccc")
|
| 1352 |
+
|
| 1353 |
+
_ROW_COLOR = {
|
| 1354 |
+
Downloader.INSTALLED: ("#afa", "#080"),
|
| 1355 |
+
Downloader.PARTIAL: ("#ffa", "#880"),
|
| 1356 |
+
Downloader.STALE: ("#faa", "#800"),
|
| 1357 |
+
Downloader.NOT_INSTALLED: ("#fff", "#888"),
|
| 1358 |
+
}
|
| 1359 |
+
|
| 1360 |
+
_MARK_COLOR = ("#000", "#ccc")
|
| 1361 |
+
|
| 1362 |
+
# _FRONT_TAB_COLOR = ('#ccf', '#008')
|
| 1363 |
+
# _BACK_TAB_COLOR = ('#88a', '#448')
|
| 1364 |
+
_FRONT_TAB_COLOR = ("#fff", "#45c")
|
| 1365 |
+
_BACK_TAB_COLOR = ("#aaa", "#67a")
|
| 1366 |
+
|
| 1367 |
+
_PROGRESS_COLOR = ("#f00", "#aaa")
|
| 1368 |
+
|
| 1369 |
+
_TAB_FONT = "helvetica -16 bold"
|
| 1370 |
+
|
| 1371 |
+
# /////////////////////////////////////////////////////////////////
|
| 1372 |
+
# Constructor
|
| 1373 |
+
# /////////////////////////////////////////////////////////////////
|
| 1374 |
+
|
| 1375 |
+
def __init__(self, dataserver, use_threads=True):
|
| 1376 |
+
self._ds = dataserver
|
| 1377 |
+
self._use_threads = use_threads
|
| 1378 |
+
|
| 1379 |
+
# For the threaded downloader:
|
| 1380 |
+
self._download_lock = threading.Lock()
|
| 1381 |
+
self._download_msg_queue = []
|
| 1382 |
+
self._download_abort_queue = []
|
| 1383 |
+
self._downloading = False
|
| 1384 |
+
|
| 1385 |
+
# For tkinter after callbacks:
|
| 1386 |
+
self._afterid = {}
|
| 1387 |
+
|
| 1388 |
+
# A message log.
|
| 1389 |
+
self._log_messages = []
|
| 1390 |
+
self._log_indent = 0
|
| 1391 |
+
self._log("NLTK Downloader Started!")
|
| 1392 |
+
|
| 1393 |
+
# Create the main window.
|
| 1394 |
+
top = self.top = Tk()
|
| 1395 |
+
top.geometry("+50+50")
|
| 1396 |
+
top.title("NLTK Downloader")
|
| 1397 |
+
top.configure(background=self._BACKDROP_COLOR[1])
|
| 1398 |
+
|
| 1399 |
+
# Set up some bindings now, in case anything goes wrong.
|
| 1400 |
+
top.bind("<Control-q>", self.destroy)
|
| 1401 |
+
top.bind("<Control-x>", self.destroy)
|
| 1402 |
+
self._destroyed = False
|
| 1403 |
+
|
| 1404 |
+
self._column_vars = {}
|
| 1405 |
+
|
| 1406 |
+
# Initialize the GUI.
|
| 1407 |
+
self._init_widgets()
|
| 1408 |
+
self._init_menu()
|
| 1409 |
+
try:
|
| 1410 |
+
self._fill_table()
|
| 1411 |
+
except HTTPError as e:
|
| 1412 |
+
showerror("Error reading from server", e)
|
| 1413 |
+
except URLError as e:
|
| 1414 |
+
showerror("Error connecting to server", e.reason)
|
| 1415 |
+
|
| 1416 |
+
self._show_info()
|
| 1417 |
+
self._select_columns()
|
| 1418 |
+
self._table.select(0)
|
| 1419 |
+
|
| 1420 |
+
# Make sure we get notified when we're destroyed, so we can
|
| 1421 |
+
# cancel any download in progress.
|
| 1422 |
+
self._table.bind("<Destroy>", self._destroy)
|
| 1423 |
+
|
| 1424 |
+
def _log(self, msg):
|
| 1425 |
+
self._log_messages.append(
|
| 1426 |
+
"{} {}{}".format(time.ctime(), " | " * self._log_indent, msg)
|
| 1427 |
+
)
|
| 1428 |
+
|
| 1429 |
+
# /////////////////////////////////////////////////////////////////
|
| 1430 |
+
# Internals
|
| 1431 |
+
# /////////////////////////////////////////////////////////////////
|
| 1432 |
+
|
| 1433 |
+
def _init_widgets(self):
|
| 1434 |
+
# Create the top-level frame structures
|
| 1435 |
+
f1 = Frame(self.top, relief="raised", border=2, padx=8, pady=0)
|
| 1436 |
+
f1.pack(sid="top", expand=True, fill="both")
|
| 1437 |
+
f1.grid_rowconfigure(2, weight=1)
|
| 1438 |
+
f1.grid_columnconfigure(0, weight=1)
|
| 1439 |
+
Frame(f1, height=8).grid(column=0, row=0) # spacer
|
| 1440 |
+
tabframe = Frame(f1)
|
| 1441 |
+
tabframe.grid(column=0, row=1, sticky="news")
|
| 1442 |
+
tableframe = Frame(f1)
|
| 1443 |
+
tableframe.grid(column=0, row=2, sticky="news")
|
| 1444 |
+
buttonframe = Frame(f1)
|
| 1445 |
+
buttonframe.grid(column=0, row=3, sticky="news")
|
| 1446 |
+
Frame(f1, height=8).grid(column=0, row=4) # spacer
|
| 1447 |
+
infoframe = Frame(f1)
|
| 1448 |
+
infoframe.grid(column=0, row=5, sticky="news")
|
| 1449 |
+
Frame(f1, height=8).grid(column=0, row=6) # spacer
|
| 1450 |
+
progressframe = Frame(
|
| 1451 |
+
self.top, padx=3, pady=3, background=self._BACKDROP_COLOR[1]
|
| 1452 |
+
)
|
| 1453 |
+
progressframe.pack(side="bottom", fill="x")
|
| 1454 |
+
self.top["border"] = 0
|
| 1455 |
+
self.top["highlightthickness"] = 0
|
| 1456 |
+
|
| 1457 |
+
# Create the tabs
|
| 1458 |
+
self._tab_names = ["Collections", "Corpora", "Models", "All Packages"]
|
| 1459 |
+
self._tabs = {}
|
| 1460 |
+
for i, tab in enumerate(self._tab_names):
|
| 1461 |
+
label = Label(tabframe, text=tab, font=self._TAB_FONT)
|
| 1462 |
+
label.pack(side="left", padx=((i + 1) % 2) * 10)
|
| 1463 |
+
label.bind("<Button-1>", self._select_tab)
|
| 1464 |
+
self._tabs[tab.lower()] = label
|
| 1465 |
+
|
| 1466 |
+
# Create the table.
|
| 1467 |
+
column_weights = [self.COLUMN_WEIGHTS.get(column, 1) for column in self.COLUMNS]
|
| 1468 |
+
self._table = Table(
|
| 1469 |
+
tableframe,
|
| 1470 |
+
self.COLUMNS,
|
| 1471 |
+
column_weights=column_weights,
|
| 1472 |
+
highlightthickness=0,
|
| 1473 |
+
listbox_height=16,
|
| 1474 |
+
reprfunc=self._table_reprfunc,
|
| 1475 |
+
)
|
| 1476 |
+
self._table.columnconfig(0, foreground=self._MARK_COLOR[0]) # marked
|
| 1477 |
+
for i, column in enumerate(self.COLUMNS):
|
| 1478 |
+
width = self.COLUMN_WIDTHS.get(column, self.DEFAULT_COLUMN_WIDTH)
|
| 1479 |
+
self._table.columnconfig(i, width=width)
|
| 1480 |
+
self._table.pack(expand=True, fill="both")
|
| 1481 |
+
self._table.focus()
|
| 1482 |
+
self._table.bind_to_listboxes("<Double-Button-1>", self._download)
|
| 1483 |
+
self._table.bind("<space>", self._table_mark)
|
| 1484 |
+
self._table.bind("<Return>", self._download)
|
| 1485 |
+
self._table.bind("<Left>", self._prev_tab)
|
| 1486 |
+
self._table.bind("<Right>", self._next_tab)
|
| 1487 |
+
self._table.bind("<Control-a>", self._mark_all)
|
| 1488 |
+
|
| 1489 |
+
# Create entry boxes for URL & download_dir
|
| 1490 |
+
infoframe.grid_columnconfigure(1, weight=1)
|
| 1491 |
+
|
| 1492 |
+
info = [
|
| 1493 |
+
("url", "Server Index:", self._set_url),
|
| 1494 |
+
("download_dir", "Download Directory:", self._set_download_dir),
|
| 1495 |
+
]
|
| 1496 |
+
self._info = {}
|
| 1497 |
+
for (i, (key, label, callback)) in enumerate(info):
|
| 1498 |
+
Label(infoframe, text=label).grid(column=0, row=i, sticky="e")
|
| 1499 |
+
entry = Entry(
|
| 1500 |
+
infoframe,
|
| 1501 |
+
font="courier",
|
| 1502 |
+
relief="groove",
|
| 1503 |
+
disabledforeground="#007aff",
|
| 1504 |
+
foreground="#007aff",
|
| 1505 |
+
)
|
| 1506 |
+
self._info[key] = (entry, callback)
|
| 1507 |
+
entry.bind("<Return>", self._info_save)
|
| 1508 |
+
entry.bind("<Button-1>", lambda e, key=key: self._info_edit(key))
|
| 1509 |
+
entry.grid(column=1, row=i, sticky="ew")
|
| 1510 |
+
|
| 1511 |
+
# If the user edits url or download_dir, and then clicks outside
|
| 1512 |
+
# the entry box, then save their results.
|
| 1513 |
+
self.top.bind("<Button-1>", self._info_save)
|
| 1514 |
+
|
| 1515 |
+
# Create Download & Refresh buttons.
|
| 1516 |
+
self._download_button = Button(
|
| 1517 |
+
buttonframe, text="Download", command=self._download, width=8
|
| 1518 |
+
)
|
| 1519 |
+
self._download_button.pack(side="left")
|
| 1520 |
+
self._refresh_button = Button(
|
| 1521 |
+
buttonframe, text="Refresh", command=self._refresh, width=8
|
| 1522 |
+
)
|
| 1523 |
+
self._refresh_button.pack(side="right")
|
| 1524 |
+
|
| 1525 |
+
# Create Progress bar
|
| 1526 |
+
self._progresslabel = Label(
|
| 1527 |
+
progressframe,
|
| 1528 |
+
text="",
|
| 1529 |
+
foreground=self._BACKDROP_COLOR[0],
|
| 1530 |
+
background=self._BACKDROP_COLOR[1],
|
| 1531 |
+
)
|
| 1532 |
+
self._progressbar = Canvas(
|
| 1533 |
+
progressframe,
|
| 1534 |
+
width=200,
|
| 1535 |
+
height=16,
|
| 1536 |
+
background=self._PROGRESS_COLOR[1],
|
| 1537 |
+
relief="sunken",
|
| 1538 |
+
border=1,
|
| 1539 |
+
)
|
| 1540 |
+
self._init_progressbar()
|
| 1541 |
+
self._progressbar.pack(side="right")
|
| 1542 |
+
self._progresslabel.pack(side="left")
|
| 1543 |
+
|
| 1544 |
+
def _init_menu(self):
|
| 1545 |
+
menubar = Menu(self.top)
|
| 1546 |
+
|
| 1547 |
+
filemenu = Menu(menubar, tearoff=0)
|
| 1548 |
+
filemenu.add_command(
|
| 1549 |
+
label="Download", underline=0, command=self._download, accelerator="Return"
|
| 1550 |
+
)
|
| 1551 |
+
filemenu.add_separator()
|
| 1552 |
+
filemenu.add_command(
|
| 1553 |
+
label="Change Server Index",
|
| 1554 |
+
underline=7,
|
| 1555 |
+
command=lambda: self._info_edit("url"),
|
| 1556 |
+
)
|
| 1557 |
+
filemenu.add_command(
|
| 1558 |
+
label="Change Download Directory",
|
| 1559 |
+
underline=0,
|
| 1560 |
+
command=lambda: self._info_edit("download_dir"),
|
| 1561 |
+
)
|
| 1562 |
+
filemenu.add_separator()
|
| 1563 |
+
filemenu.add_command(label="Show Log", underline=5, command=self._show_log)
|
| 1564 |
+
filemenu.add_separator()
|
| 1565 |
+
filemenu.add_command(
|
| 1566 |
+
label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x"
|
| 1567 |
+
)
|
| 1568 |
+
menubar.add_cascade(label="File", underline=0, menu=filemenu)
|
| 1569 |
+
|
| 1570 |
+
# Create a menu to control which columns of the table are
|
| 1571 |
+
# shown. n.b.: we never hide the first two columns (mark and
|
| 1572 |
+
# identifier).
|
| 1573 |
+
viewmenu = Menu(menubar, tearoff=0)
|
| 1574 |
+
for column in self._table.column_names[2:]:
|
| 1575 |
+
var = IntVar(self.top)
|
| 1576 |
+
assert column not in self._column_vars
|
| 1577 |
+
self._column_vars[column] = var
|
| 1578 |
+
if column in self.INITIAL_COLUMNS:
|
| 1579 |
+
var.set(1)
|
| 1580 |
+
viewmenu.add_checkbutton(
|
| 1581 |
+
label=column, underline=0, variable=var, command=self._select_columns
|
| 1582 |
+
)
|
| 1583 |
+
menubar.add_cascade(label="View", underline=0, menu=viewmenu)
|
| 1584 |
+
|
| 1585 |
+
# Create a sort menu
|
| 1586 |
+
# [xx] this should be selectbuttons; and it should include
|
| 1587 |
+
# reversed sorts as options.
|
| 1588 |
+
sortmenu = Menu(menubar, tearoff=0)
|
| 1589 |
+
for column in self._table.column_names[1:]:
|
| 1590 |
+
sortmenu.add_command(
|
| 1591 |
+
label="Sort by %s" % column,
|
| 1592 |
+
command=(lambda c=column: self._table.sort_by(c, "ascending")),
|
| 1593 |
+
)
|
| 1594 |
+
sortmenu.add_separator()
|
| 1595 |
+
# sortmenu.add_command(label='Descending Sort:')
|
| 1596 |
+
for column in self._table.column_names[1:]:
|
| 1597 |
+
sortmenu.add_command(
|
| 1598 |
+
label="Reverse sort by %s" % column,
|
| 1599 |
+
command=(lambda c=column: self._table.sort_by(c, "descending")),
|
| 1600 |
+
)
|
| 1601 |
+
menubar.add_cascade(label="Sort", underline=0, menu=sortmenu)
|
| 1602 |
+
|
| 1603 |
+
helpmenu = Menu(menubar, tearoff=0)
|
| 1604 |
+
helpmenu.add_command(label="About", underline=0, command=self.about)
|
| 1605 |
+
helpmenu.add_command(
|
| 1606 |
+
label="Instructions", underline=0, command=self.help, accelerator="F1"
|
| 1607 |
+
)
|
| 1608 |
+
menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
|
| 1609 |
+
self.top.bind("<F1>", self.help)
|
| 1610 |
+
|
| 1611 |
+
self.top.config(menu=menubar)
|
| 1612 |
+
|
| 1613 |
+
def _select_columns(self):
|
| 1614 |
+
for (column, var) in self._column_vars.items():
|
| 1615 |
+
if var.get():
|
| 1616 |
+
self._table.show_column(column)
|
| 1617 |
+
else:
|
| 1618 |
+
self._table.hide_column(column)
|
| 1619 |
+
|
| 1620 |
+
def _refresh(self):
|
| 1621 |
+
self._ds.clear_status_cache()
|
| 1622 |
+
try:
|
| 1623 |
+
self._fill_table()
|
| 1624 |
+
except HTTPError as e:
|
| 1625 |
+
showerror("Error reading from server", e)
|
| 1626 |
+
except URLError as e:
|
| 1627 |
+
showerror("Error connecting to server", e.reason)
|
| 1628 |
+
self._table.select(0)
|
| 1629 |
+
|
| 1630 |
+
def _info_edit(self, info_key):
|
| 1631 |
+
self._info_save() # just in case.
|
| 1632 |
+
(entry, callback) = self._info[info_key]
|
| 1633 |
+
entry["state"] = "normal"
|
| 1634 |
+
entry["relief"] = "sunken"
|
| 1635 |
+
entry.focus()
|
| 1636 |
+
|
| 1637 |
+
def _info_save(self, e=None):
|
| 1638 |
+
focus = self._table
|
| 1639 |
+
for entry, callback in self._info.values():
|
| 1640 |
+
if entry["state"] == "disabled":
|
| 1641 |
+
continue
|
| 1642 |
+
if e is not None and e.widget is entry and e.keysym != "Return":
|
| 1643 |
+
focus = entry
|
| 1644 |
+
else:
|
| 1645 |
+
entry["state"] = "disabled"
|
| 1646 |
+
entry["relief"] = "groove"
|
| 1647 |
+
callback(entry.get())
|
| 1648 |
+
focus.focus()
|
| 1649 |
+
|
| 1650 |
+
def _table_reprfunc(self, row, col, val):
|
| 1651 |
+
if self._table.column_names[col].endswith("Size"):
|
| 1652 |
+
if isinstance(val, str):
|
| 1653 |
+
return " %s" % val
|
| 1654 |
+
elif val < 1024**2:
|
| 1655 |
+
return " %.1f KB" % (val / 1024.0**1)
|
| 1656 |
+
elif val < 1024**3:
|
| 1657 |
+
return " %.1f MB" % (val / 1024.0**2)
|
| 1658 |
+
else:
|
| 1659 |
+
return " %.1f GB" % (val / 1024.0**3)
|
| 1660 |
+
|
| 1661 |
+
if col in (0, ""):
|
| 1662 |
+
return str(val)
|
| 1663 |
+
else:
|
| 1664 |
+
return " %s" % val
|
| 1665 |
+
|
| 1666 |
+
def _set_url(self, url):
|
| 1667 |
+
if url == self._ds.url:
|
| 1668 |
+
return
|
| 1669 |
+
try:
|
| 1670 |
+
self._ds.url = url
|
| 1671 |
+
self._fill_table()
|
| 1672 |
+
except OSError as e:
|
| 1673 |
+
showerror("Error Setting Server Index", str(e))
|
| 1674 |
+
self._show_info()
|
| 1675 |
+
|
| 1676 |
+
def _set_download_dir(self, download_dir):
|
| 1677 |
+
if self._ds.download_dir == download_dir:
|
| 1678 |
+
return
|
| 1679 |
+
# check if the dir exists, and if not, ask if we should create it?
|
| 1680 |
+
|
| 1681 |
+
# Clear our status cache, & re-check what's installed
|
| 1682 |
+
self._ds.download_dir = download_dir
|
| 1683 |
+
try:
|
| 1684 |
+
self._fill_table()
|
| 1685 |
+
except HTTPError as e:
|
| 1686 |
+
showerror("Error reading from server", e)
|
| 1687 |
+
except URLError as e:
|
| 1688 |
+
showerror("Error connecting to server", e.reason)
|
| 1689 |
+
self._show_info()
|
| 1690 |
+
|
| 1691 |
+
def _show_info(self):
|
| 1692 |
+
print("showing info", self._ds.url)
|
| 1693 |
+
for entry, cb in self._info.values():
|
| 1694 |
+
entry["state"] = "normal"
|
| 1695 |
+
entry.delete(0, "end")
|
| 1696 |
+
self._info["url"][0].insert(0, self._ds.url)
|
| 1697 |
+
self._info["download_dir"][0].insert(0, self._ds.download_dir)
|
| 1698 |
+
for entry, cb in self._info.values():
|
| 1699 |
+
entry["state"] = "disabled"
|
| 1700 |
+
|
| 1701 |
+
def _prev_tab(self, *e):
|
| 1702 |
+
for i, tab in enumerate(self._tab_names):
|
| 1703 |
+
if tab.lower() == self._tab and i > 0:
|
| 1704 |
+
self._tab = self._tab_names[i - 1].lower()
|
| 1705 |
+
try:
|
| 1706 |
+
return self._fill_table()
|
| 1707 |
+
except HTTPError as e:
|
| 1708 |
+
showerror("Error reading from server", e)
|
| 1709 |
+
except URLError as e:
|
| 1710 |
+
showerror("Error connecting to server", e.reason)
|
| 1711 |
+
|
| 1712 |
+
def _next_tab(self, *e):
|
| 1713 |
+
for i, tab in enumerate(self._tab_names):
|
| 1714 |
+
if tab.lower() == self._tab and i < (len(self._tabs) - 1):
|
| 1715 |
+
self._tab = self._tab_names[i + 1].lower()
|
| 1716 |
+
try:
|
| 1717 |
+
return self._fill_table()
|
| 1718 |
+
except HTTPError as e:
|
| 1719 |
+
showerror("Error reading from server", e)
|
| 1720 |
+
except URLError as e:
|
| 1721 |
+
showerror("Error connecting to server", e.reason)
|
| 1722 |
+
|
| 1723 |
+
def _select_tab(self, event):
|
| 1724 |
+
self._tab = event.widget["text"].lower()
|
| 1725 |
+
try:
|
| 1726 |
+
self._fill_table()
|
| 1727 |
+
except HTTPError as e:
|
| 1728 |
+
showerror("Error reading from server", e)
|
| 1729 |
+
except URLError as e:
|
| 1730 |
+
showerror("Error connecting to server", e.reason)
|
| 1731 |
+
|
| 1732 |
+
_tab = "collections"
|
| 1733 |
+
# _tab = 'corpora'
|
| 1734 |
+
_rows = None
|
| 1735 |
+
|
| 1736 |
+
def _fill_table(self):
|
| 1737 |
+
selected_row = self._table.selected_row()
|
| 1738 |
+
self._table.clear()
|
| 1739 |
+
if self._tab == "all packages":
|
| 1740 |
+
items = self._ds.packages()
|
| 1741 |
+
elif self._tab == "corpora":
|
| 1742 |
+
items = self._ds.corpora()
|
| 1743 |
+
elif self._tab == "models":
|
| 1744 |
+
items = self._ds.models()
|
| 1745 |
+
elif self._tab == "collections":
|
| 1746 |
+
items = self._ds.collections()
|
| 1747 |
+
else:
|
| 1748 |
+
assert 0, "bad tab value %r" % self._tab
|
| 1749 |
+
rows = [self._package_to_columns(item) for item in items]
|
| 1750 |
+
self._table.extend(rows)
|
| 1751 |
+
|
| 1752 |
+
# Highlight the active tab.
|
| 1753 |
+
for tab, label in self._tabs.items():
|
| 1754 |
+
if tab == self._tab:
|
| 1755 |
+
label.configure(
|
| 1756 |
+
foreground=self._FRONT_TAB_COLOR[0],
|
| 1757 |
+
background=self._FRONT_TAB_COLOR[1],
|
| 1758 |
+
)
|
| 1759 |
+
else:
|
| 1760 |
+
label.configure(
|
| 1761 |
+
foreground=self._BACK_TAB_COLOR[0],
|
| 1762 |
+
background=self._BACK_TAB_COLOR[1],
|
| 1763 |
+
)
|
| 1764 |
+
|
| 1765 |
+
self._table.sort_by("Identifier", order="ascending")
|
| 1766 |
+
self._color_table()
|
| 1767 |
+
self._table.select(selected_row)
|
| 1768 |
+
|
| 1769 |
+
# This is a hack, because the scrollbar isn't updating its
|
| 1770 |
+
# position right -- I'm not sure what the underlying cause is
|
| 1771 |
+
# though. (This is on OS X w/ python 2.5) The length of
|
| 1772 |
+
# delay that's necessary seems to depend on how fast the
|
| 1773 |
+
# comptuer is. :-/
|
| 1774 |
+
self.top.after(150, self._table._scrollbar.set, *self._table._mlb.yview())
|
| 1775 |
+
self.top.after(300, self._table._scrollbar.set, *self._table._mlb.yview())
|
| 1776 |
+
|
| 1777 |
+
def _update_table_status(self):
|
| 1778 |
+
for row_num in range(len(self._table)):
|
| 1779 |
+
status = self._ds.status(self._table[row_num, "Identifier"])
|
| 1780 |
+
self._table[row_num, "Status"] = status
|
| 1781 |
+
self._color_table()
|
| 1782 |
+
|
| 1783 |
+
def _download(self, *e):
|
| 1784 |
+
# If we're using threads, then delegate to the threaded
|
| 1785 |
+
# downloader instead.
|
| 1786 |
+
if self._use_threads:
|
| 1787 |
+
return self._download_threaded(*e)
|
| 1788 |
+
|
| 1789 |
+
marked = [
|
| 1790 |
+
self._table[row, "Identifier"]
|
| 1791 |
+
for row in range(len(self._table))
|
| 1792 |
+
if self._table[row, 0] != ""
|
| 1793 |
+
]
|
| 1794 |
+
selection = self._table.selected_row()
|
| 1795 |
+
if not marked and selection is not None:
|
| 1796 |
+
marked = [self._table[selection, "Identifier"]]
|
| 1797 |
+
|
| 1798 |
+
download_iter = self._ds.incr_download(marked, self._ds.download_dir)
|
| 1799 |
+
self._log_indent = 0
|
| 1800 |
+
self._download_cb(download_iter, marked)
|
| 1801 |
+
|
| 1802 |
+
_DL_DELAY = 10
|
| 1803 |
+
|
| 1804 |
+
def _download_cb(self, download_iter, ids):
|
| 1805 |
+
try:
|
| 1806 |
+
msg = next(download_iter)
|
| 1807 |
+
except StopIteration:
|
| 1808 |
+
# self._fill_table(sort=False)
|
| 1809 |
+
self._update_table_status()
|
| 1810 |
+
afterid = self.top.after(10, self._show_progress, 0)
|
| 1811 |
+
self._afterid["_download_cb"] = afterid
|
| 1812 |
+
return
|
| 1813 |
+
|
| 1814 |
+
def show(s):
|
| 1815 |
+
self._progresslabel["text"] = s
|
| 1816 |
+
self._log(s)
|
| 1817 |
+
|
| 1818 |
+
if isinstance(msg, ProgressMessage):
|
| 1819 |
+
self._show_progress(msg.progress)
|
| 1820 |
+
elif isinstance(msg, ErrorMessage):
|
| 1821 |
+
show(msg.message)
|
| 1822 |
+
if msg.package is not None:
|
| 1823 |
+
self._select(msg.package.id)
|
| 1824 |
+
self._show_progress(None)
|
| 1825 |
+
return # halt progress.
|
| 1826 |
+
elif isinstance(msg, StartCollectionMessage):
|
| 1827 |
+
show("Downloading collection %s" % msg.collection.id)
|
| 1828 |
+
self._log_indent += 1
|
| 1829 |
+
elif isinstance(msg, StartPackageMessage):
|
| 1830 |
+
show("Downloading package %s" % msg.package.id)
|
| 1831 |
+
elif isinstance(msg, UpToDateMessage):
|
| 1832 |
+
show("Package %s is up-to-date!" % msg.package.id)
|
| 1833 |
+
# elif isinstance(msg, StaleMessage):
|
| 1834 |
+
# show('Package %s is out-of-date or corrupt' % msg.package.id)
|
| 1835 |
+
elif isinstance(msg, FinishDownloadMessage):
|
| 1836 |
+
show("Finished downloading %r." % msg.package.id)
|
| 1837 |
+
elif isinstance(msg, StartUnzipMessage):
|
| 1838 |
+
show("Unzipping %s" % msg.package.filename)
|
| 1839 |
+
elif isinstance(msg, FinishCollectionMessage):
|
| 1840 |
+
self._log_indent -= 1
|
| 1841 |
+
show("Finished downloading collection %r." % msg.collection.id)
|
| 1842 |
+
self._clear_mark(msg.collection.id)
|
| 1843 |
+
elif isinstance(msg, FinishPackageMessage):
|
| 1844 |
+
self._clear_mark(msg.package.id)
|
| 1845 |
+
afterid = self.top.after(self._DL_DELAY, self._download_cb, download_iter, ids)
|
| 1846 |
+
self._afterid["_download_cb"] = afterid
|
| 1847 |
+
|
| 1848 |
+
def _select(self, id):
|
| 1849 |
+
for row in range(len(self._table)):
|
| 1850 |
+
if self._table[row, "Identifier"] == id:
|
| 1851 |
+
self._table.select(row)
|
| 1852 |
+
return
|
| 1853 |
+
|
| 1854 |
+
def _color_table(self):
|
| 1855 |
+
# Color rows according to status.
|
| 1856 |
+
for row in range(len(self._table)):
|
| 1857 |
+
bg, sbg = self._ROW_COLOR[self._table[row, "Status"]]
|
| 1858 |
+
fg, sfg = ("black", "white")
|
| 1859 |
+
self._table.rowconfig(
|
| 1860 |
+
row,
|
| 1861 |
+
foreground=fg,
|
| 1862 |
+
selectforeground=sfg,
|
| 1863 |
+
background=bg,
|
| 1864 |
+
selectbackground=sbg,
|
| 1865 |
+
)
|
| 1866 |
+
# Color the marked column
|
| 1867 |
+
self._table.itemconfigure(
|
| 1868 |
+
row, 0, foreground=self._MARK_COLOR[0], background=self._MARK_COLOR[1]
|
| 1869 |
+
)
|
| 1870 |
+
|
| 1871 |
+
def _clear_mark(self, id):
|
| 1872 |
+
for row in range(len(self._table)):
|
| 1873 |
+
if self._table[row, "Identifier"] == id:
|
| 1874 |
+
self._table[row, 0] = ""
|
| 1875 |
+
|
| 1876 |
+
def _mark_all(self, *e):
|
| 1877 |
+
for row in range(len(self._table)):
|
| 1878 |
+
self._table[row, 0] = "X"
|
| 1879 |
+
|
| 1880 |
+
def _table_mark(self, *e):
|
| 1881 |
+
selection = self._table.selected_row()
|
| 1882 |
+
if selection >= 0:
|
| 1883 |
+
if self._table[selection][0] != "":
|
| 1884 |
+
self._table[selection, 0] = ""
|
| 1885 |
+
else:
|
| 1886 |
+
self._table[selection, 0] = "X"
|
| 1887 |
+
self._table.select(delta=1)
|
| 1888 |
+
|
| 1889 |
+
def _show_log(self):
|
| 1890 |
+
text = "\n".join(self._log_messages)
|
| 1891 |
+
ShowText(self.top, "NLTK Downloader Log", text)
|
| 1892 |
+
|
| 1893 |
+
def _package_to_columns(self, pkg):
|
| 1894 |
+
"""
|
| 1895 |
+
Given a package, return a list of values describing that
|
| 1896 |
+
package, one for each column in ``self.COLUMNS``.
|
| 1897 |
+
"""
|
| 1898 |
+
row = []
|
| 1899 |
+
for column_index, column_name in enumerate(self.COLUMNS):
|
| 1900 |
+
if column_index == 0: # Mark:
|
| 1901 |
+
row.append("")
|
| 1902 |
+
elif column_name == "Identifier":
|
| 1903 |
+
row.append(pkg.id)
|
| 1904 |
+
elif column_name == "Status":
|
| 1905 |
+
row.append(self._ds.status(pkg))
|
| 1906 |
+
else:
|
| 1907 |
+
attr = column_name.lower().replace(" ", "_")
|
| 1908 |
+
row.append(getattr(pkg, attr, "n/a"))
|
| 1909 |
+
return row
|
| 1910 |
+
|
| 1911 |
+
# /////////////////////////////////////////////////////////////////
|
| 1912 |
+
# External Interface
|
| 1913 |
+
# /////////////////////////////////////////////////////////////////
|
| 1914 |
+
|
| 1915 |
+
def destroy(self, *e):
|
| 1916 |
+
if self._destroyed:
|
| 1917 |
+
return
|
| 1918 |
+
self.top.destroy()
|
| 1919 |
+
self._destroyed = True
|
| 1920 |
+
|
| 1921 |
+
def _destroy(self, *e):
|
| 1922 |
+
if self.top is not None:
|
| 1923 |
+
for afterid in self._afterid.values():
|
| 1924 |
+
self.top.after_cancel(afterid)
|
| 1925 |
+
|
| 1926 |
+
# Abort any download in progress.
|
| 1927 |
+
if self._downloading and self._use_threads:
|
| 1928 |
+
self._abort_download()
|
| 1929 |
+
|
| 1930 |
+
# Make sure the garbage collector destroys these now;
|
| 1931 |
+
# otherwise, they may get destroyed when we're not in the main
|
| 1932 |
+
# thread, which would make Tkinter unhappy.
|
| 1933 |
+
self._column_vars.clear()
|
| 1934 |
+
|
| 1935 |
+
def mainloop(self, *args, **kwargs):
|
| 1936 |
+
self.top.mainloop(*args, **kwargs)
|
| 1937 |
+
|
| 1938 |
+
# /////////////////////////////////////////////////////////////////
|
| 1939 |
+
# HELP
|
| 1940 |
+
# /////////////////////////////////////////////////////////////////
|
| 1941 |
+
|
| 1942 |
+
HELP = textwrap.dedent(
|
| 1943 |
+
"""\
|
| 1944 |
+
This tool can be used to download a variety of corpora and models
|
| 1945 |
+
that can be used with NLTK. Each corpus or model is distributed
|
| 1946 |
+
in a single zip file, known as a \"package file.\" You can
|
| 1947 |
+
download packages individually, or you can download pre-defined
|
| 1948 |
+
collections of packages.
|
| 1949 |
+
|
| 1950 |
+
When you download a package, it will be saved to the \"download
|
| 1951 |
+
directory.\" A default download directory is chosen when you run
|
| 1952 |
+
|
| 1953 |
+
the downloader; but you may also select a different download
|
| 1954 |
+
directory. On Windows, the default download directory is
|
| 1955 |
+
|
| 1956 |
+
|
| 1957 |
+
\"package.\"
|
| 1958 |
+
|
| 1959 |
+
The NLTK downloader can be used to download a variety of corpora,
|
| 1960 |
+
models, and other data packages.
|
| 1961 |
+
|
| 1962 |
+
Keyboard shortcuts::
|
| 1963 |
+
[return]\t Download
|
| 1964 |
+
[up]\t Select previous package
|
| 1965 |
+
[down]\t Select next package
|
| 1966 |
+
[left]\t Select previous tab
|
| 1967 |
+
[right]\t Select next tab
|
| 1968 |
+
"""
|
| 1969 |
+
)
|
| 1970 |
+
|
| 1971 |
+
def help(self, *e):
|
| 1972 |
+
# The default font's not very legible; try using 'fixed' instead.
|
| 1973 |
+
try:
|
| 1974 |
+
ShowText(
|
| 1975 |
+
self.top,
|
| 1976 |
+
"Help: NLTK Downloader",
|
| 1977 |
+
self.HELP.strip(),
|
| 1978 |
+
width=75,
|
| 1979 |
+
font="fixed",
|
| 1980 |
+
)
|
| 1981 |
+
except:
|
| 1982 |
+
ShowText(self.top, "Help: NLTK Downloader", self.HELP.strip(), width=75)
|
| 1983 |
+
|
| 1984 |
+
def about(self, *e):
|
| 1985 |
+
ABOUT = "NLTK Downloader\n" + "Written by Edward Loper"
|
| 1986 |
+
TITLE = "About: NLTK Downloader"
|
| 1987 |
+
try:
|
| 1988 |
+
from tkinter.messagebox import Message
|
| 1989 |
+
|
| 1990 |
+
Message(message=ABOUT, title=TITLE).show()
|
| 1991 |
+
except ImportError:
|
| 1992 |
+
ShowText(self.top, TITLE, ABOUT)
|
| 1993 |
+
|
| 1994 |
+
# /////////////////////////////////////////////////////////////////
|
| 1995 |
+
# Progress Bar
|
| 1996 |
+
# /////////////////////////////////////////////////////////////////
|
| 1997 |
+
|
| 1998 |
+
_gradient_width = 5
|
| 1999 |
+
|
| 2000 |
+
def _init_progressbar(self):
|
| 2001 |
+
c = self._progressbar
|
| 2002 |
+
width, height = int(c["width"]), int(c["height"])
|
| 2003 |
+
for i in range(0, (int(c["width"]) * 2) // self._gradient_width):
|
| 2004 |
+
c.create_line(
|
| 2005 |
+
i * self._gradient_width + 20,
|
| 2006 |
+
-20,
|
| 2007 |
+
i * self._gradient_width - height - 20,
|
| 2008 |
+
height + 20,
|
| 2009 |
+
width=self._gradient_width,
|
| 2010 |
+
fill="#%02x0000" % (80 + abs(i % 6 - 3) * 12),
|
| 2011 |
+
)
|
| 2012 |
+
c.addtag_all("gradient")
|
| 2013 |
+
c.itemconfig("gradient", state="hidden")
|
| 2014 |
+
|
| 2015 |
+
# This is used to display progress
|
| 2016 |
+
c.addtag_withtag(
|
| 2017 |
+
"redbox", c.create_rectangle(0, 0, 0, 0, fill=self._PROGRESS_COLOR[0])
|
| 2018 |
+
)
|
| 2019 |
+
|
| 2020 |
+
def _show_progress(self, percent):
|
| 2021 |
+
c = self._progressbar
|
| 2022 |
+
if percent is None:
|
| 2023 |
+
c.coords("redbox", 0, 0, 0, 0)
|
| 2024 |
+
c.itemconfig("gradient", state="hidden")
|
| 2025 |
+
else:
|
| 2026 |
+
width, height = int(c["width"]), int(c["height"])
|
| 2027 |
+
x = percent * int(width) // 100 + 1
|
| 2028 |
+
c.coords("redbox", 0, 0, x, height + 1)
|
| 2029 |
+
|
| 2030 |
+
def _progress_alive(self):
|
| 2031 |
+
c = self._progressbar
|
| 2032 |
+
if not self._downloading:
|
| 2033 |
+
c.itemconfig("gradient", state="hidden")
|
| 2034 |
+
else:
|
| 2035 |
+
c.itemconfig("gradient", state="normal")
|
| 2036 |
+
x1, y1, x2, y2 = c.bbox("gradient")
|
| 2037 |
+
if x1 <= -100:
|
| 2038 |
+
c.move("gradient", (self._gradient_width * 6) - 4, 0)
|
| 2039 |
+
else:
|
| 2040 |
+
c.move("gradient", -4, 0)
|
| 2041 |
+
afterid = self.top.after(200, self._progress_alive)
|
| 2042 |
+
self._afterid["_progress_alive"] = afterid
|
| 2043 |
+
|
| 2044 |
+
# /////////////////////////////////////////////////////////////////
|
| 2045 |
+
# Threaded downloader
|
| 2046 |
+
# /////////////////////////////////////////////////////////////////
|
| 2047 |
+
|
| 2048 |
+
def _download_threaded(self, *e):
|
| 2049 |
+
# If the user tries to start a new download while we're already
|
| 2050 |
+
# downloading something, then abort the current download instead.
|
| 2051 |
+
if self._downloading:
|
| 2052 |
+
self._abort_download()
|
| 2053 |
+
return
|
| 2054 |
+
|
| 2055 |
+
# Change the 'download' button to an 'abort' button.
|
| 2056 |
+
self._download_button["text"] = "Cancel"
|
| 2057 |
+
|
| 2058 |
+
marked = [
|
| 2059 |
+
self._table[row, "Identifier"]
|
| 2060 |
+
for row in range(len(self._table))
|
| 2061 |
+
if self._table[row, 0] != ""
|
| 2062 |
+
]
|
| 2063 |
+
selection = self._table.selected_row()
|
| 2064 |
+
if not marked and selection is not None:
|
| 2065 |
+
marked = [self._table[selection, "Identifier"]]
|
| 2066 |
+
|
| 2067 |
+
# Create a new data server object for the download operation,
|
| 2068 |
+
# just in case the user modifies our data server during the
|
| 2069 |
+
# download (e.g., clicking 'refresh' or editing the index url).
|
| 2070 |
+
ds = Downloader(self._ds.url, self._ds.download_dir)
|
| 2071 |
+
|
| 2072 |
+
# Start downloading in a separate thread.
|
| 2073 |
+
assert self._download_msg_queue == []
|
| 2074 |
+
assert self._download_abort_queue == []
|
| 2075 |
+
self._DownloadThread(
|
| 2076 |
+
ds,
|
| 2077 |
+
marked,
|
| 2078 |
+
self._download_lock,
|
| 2079 |
+
self._download_msg_queue,
|
| 2080 |
+
self._download_abort_queue,
|
| 2081 |
+
).start()
|
| 2082 |
+
|
| 2083 |
+
# Monitor the download message queue & display its progress.
|
| 2084 |
+
self._log_indent = 0
|
| 2085 |
+
self._downloading = True
|
| 2086 |
+
self._monitor_message_queue()
|
| 2087 |
+
|
| 2088 |
+
# Display an indication that we're still alive and well by
|
| 2089 |
+
# cycling the progress bar.
|
| 2090 |
+
self._progress_alive()
|
| 2091 |
+
|
| 2092 |
+
def _abort_download(self):
|
| 2093 |
+
if self._downloading:
|
| 2094 |
+
self._download_lock.acquire()
|
| 2095 |
+
self._download_abort_queue.append("abort")
|
| 2096 |
+
self._download_lock.release()
|
| 2097 |
+
|
| 2098 |
+
class _DownloadThread(threading.Thread):
|
| 2099 |
+
def __init__(self, data_server, items, lock, message_queue, abort):
|
| 2100 |
+
self.data_server = data_server
|
| 2101 |
+
self.items = items
|
| 2102 |
+
self.lock = lock
|
| 2103 |
+
self.message_queue = message_queue
|
| 2104 |
+
self.abort = abort
|
| 2105 |
+
threading.Thread.__init__(self)
|
| 2106 |
+
|
| 2107 |
+
def run(self):
|
| 2108 |
+
for msg in self.data_server.incr_download(self.items):
|
| 2109 |
+
self.lock.acquire()
|
| 2110 |
+
self.message_queue.append(msg)
|
| 2111 |
+
# Check if we've been told to kill ourselves:
|
| 2112 |
+
if self.abort:
|
| 2113 |
+
self.message_queue.append("aborted")
|
| 2114 |
+
self.lock.release()
|
| 2115 |
+
return
|
| 2116 |
+
self.lock.release()
|
| 2117 |
+
self.lock.acquire()
|
| 2118 |
+
self.message_queue.append("finished")
|
| 2119 |
+
self.lock.release()
|
| 2120 |
+
|
| 2121 |
+
_MONITOR_QUEUE_DELAY = 100
|
| 2122 |
+
|
| 2123 |
+
def _monitor_message_queue(self):
|
| 2124 |
+
def show(s):
|
| 2125 |
+
self._progresslabel["text"] = s
|
| 2126 |
+
self._log(s)
|
| 2127 |
+
|
| 2128 |
+
# Try to acquire the lock; if it's busy, then just try again later.
|
| 2129 |
+
if not self._download_lock.acquire():
|
| 2130 |
+
return
|
| 2131 |
+
for msg in self._download_msg_queue:
|
| 2132 |
+
|
| 2133 |
+
# Done downloading?
|
| 2134 |
+
if msg == "finished" or msg == "aborted":
|
| 2135 |
+
# self._fill_table(sort=False)
|
| 2136 |
+
self._update_table_status()
|
| 2137 |
+
self._downloading = False
|
| 2138 |
+
self._download_button["text"] = "Download"
|
| 2139 |
+
del self._download_msg_queue[:]
|
| 2140 |
+
del self._download_abort_queue[:]
|
| 2141 |
+
self._download_lock.release()
|
| 2142 |
+
if msg == "aborted":
|
| 2143 |
+
show("Download aborted!")
|
| 2144 |
+
self._show_progress(None)
|
| 2145 |
+
else:
|
| 2146 |
+
afterid = self.top.after(100, self._show_progress, None)
|
| 2147 |
+
self._afterid["_monitor_message_queue"] = afterid
|
| 2148 |
+
return
|
| 2149 |
+
|
| 2150 |
+
# All other messages
|
| 2151 |
+
elif isinstance(msg, ProgressMessage):
|
| 2152 |
+
self._show_progress(msg.progress)
|
| 2153 |
+
elif isinstance(msg, ErrorMessage):
|
| 2154 |
+
show(msg.message)
|
| 2155 |
+
if msg.package is not None:
|
| 2156 |
+
self._select(msg.package.id)
|
| 2157 |
+
self._show_progress(None)
|
| 2158 |
+
self._downloading = False
|
| 2159 |
+
return # halt progress.
|
| 2160 |
+
elif isinstance(msg, StartCollectionMessage):
|
| 2161 |
+
show("Downloading collection %r" % msg.collection.id)
|
| 2162 |
+
self._log_indent += 1
|
| 2163 |
+
elif isinstance(msg, StartPackageMessage):
|
| 2164 |
+
self._ds.clear_status_cache(msg.package.id)
|
| 2165 |
+
show("Downloading package %r" % msg.package.id)
|
| 2166 |
+
elif isinstance(msg, UpToDateMessage):
|
| 2167 |
+
show("Package %s is up-to-date!" % msg.package.id)
|
| 2168 |
+
# elif isinstance(msg, StaleMessage):
|
| 2169 |
+
# show('Package %s is out-of-date or corrupt; updating it' %
|
| 2170 |
+
# msg.package.id)
|
| 2171 |
+
elif isinstance(msg, FinishDownloadMessage):
|
| 2172 |
+
show("Finished downloading %r." % msg.package.id)
|
| 2173 |
+
elif isinstance(msg, StartUnzipMessage):
|
| 2174 |
+
show("Unzipping %s" % msg.package.filename)
|
| 2175 |
+
elif isinstance(msg, FinishUnzipMessage):
|
| 2176 |
+
show("Finished installing %s" % msg.package.id)
|
| 2177 |
+
elif isinstance(msg, FinishCollectionMessage):
|
| 2178 |
+
self._log_indent -= 1
|
| 2179 |
+
show("Finished downloading collection %r." % msg.collection.id)
|
| 2180 |
+
self._clear_mark(msg.collection.id)
|
| 2181 |
+
elif isinstance(msg, FinishPackageMessage):
|
| 2182 |
+
self._update_table_status()
|
| 2183 |
+
self._clear_mark(msg.package.id)
|
| 2184 |
+
|
| 2185 |
+
# Let the user know when we're aborting a download (but
|
| 2186 |
+
# waiting for a good point to abort it, so we don't end up
|
| 2187 |
+
# with a partially unzipped package or anything like that).
|
| 2188 |
+
if self._download_abort_queue:
|
| 2189 |
+
self._progresslabel["text"] = "Aborting download..."
|
| 2190 |
+
|
| 2191 |
+
# Clear the message queue and then release the lock
|
| 2192 |
+
del self._download_msg_queue[:]
|
| 2193 |
+
self._download_lock.release()
|
| 2194 |
+
|
| 2195 |
+
# Check the queue again after MONITOR_QUEUE_DELAY msec.
|
| 2196 |
+
afterid = self.top.after(self._MONITOR_QUEUE_DELAY, self._monitor_message_queue)
|
| 2197 |
+
self._afterid["_monitor_message_queue"] = afterid
|
| 2198 |
+
|
| 2199 |
+
|
| 2200 |
+
######################################################################
|
| 2201 |
+
# Helper Functions
|
| 2202 |
+
######################################################################
|
| 2203 |
+
# [xx] It may make sense to move these to nltk.internals.
|
| 2204 |
+
|
| 2205 |
+
|
| 2206 |
+
def md5_hexdigest(file):
|
| 2207 |
+
"""
|
| 2208 |
+
Calculate and return the MD5 checksum for a given file.
|
| 2209 |
+
``file`` may either be a filename or an open stream.
|
| 2210 |
+
"""
|
| 2211 |
+
if isinstance(file, str):
|
| 2212 |
+
with open(file, "rb") as infile:
|
| 2213 |
+
return _md5_hexdigest(infile)
|
| 2214 |
+
return _md5_hexdigest(file)
|
| 2215 |
+
|
| 2216 |
+
|
| 2217 |
+
def _md5_hexdigest(fp):
|
| 2218 |
+
md5_digest = md5()
|
| 2219 |
+
while True:
|
| 2220 |
+
block = fp.read(1024 * 16) # 16k blocks
|
| 2221 |
+
if not block:
|
| 2222 |
+
break
|
| 2223 |
+
md5_digest.update(block)
|
| 2224 |
+
return md5_digest.hexdigest()
|
| 2225 |
+
|
| 2226 |
+
|
| 2227 |
+
# change this to periodically yield progress messages?
|
| 2228 |
+
# [xx] get rid of topdir parameter -- we should be checking
|
| 2229 |
+
# this when we build the index, anyway.
|
| 2230 |
+
def unzip(filename, root, verbose=True):
|
| 2231 |
+
"""
|
| 2232 |
+
Extract the contents of the zip file ``filename`` into the
|
| 2233 |
+
directory ``root``.
|
| 2234 |
+
"""
|
| 2235 |
+
for message in _unzip_iter(filename, root, verbose):
|
| 2236 |
+
if isinstance(message, ErrorMessage):
|
| 2237 |
+
raise Exception(message)
|
| 2238 |
+
|
| 2239 |
+
|
| 2240 |
+
def _unzip_iter(filename, root, verbose=True):
|
| 2241 |
+
if verbose:
|
| 2242 |
+
sys.stdout.write("Unzipping %s" % os.path.split(filename)[1])
|
| 2243 |
+
sys.stdout.flush()
|
| 2244 |
+
|
| 2245 |
+
try:
|
| 2246 |
+
zf = zipfile.ZipFile(filename)
|
| 2247 |
+
except zipfile.error as e:
|
| 2248 |
+
yield ErrorMessage(filename, "Error with downloaded zip file")
|
| 2249 |
+
return
|
| 2250 |
+
except Exception as e:
|
| 2251 |
+
yield ErrorMessage(filename, e)
|
| 2252 |
+
return
|
| 2253 |
+
|
| 2254 |
+
zf.extractall(root)
|
| 2255 |
+
|
| 2256 |
+
if verbose:
|
| 2257 |
+
print()
|
| 2258 |
+
|
| 2259 |
+
|
| 2260 |
+
######################################################################
|
| 2261 |
+
# Index Builder
|
| 2262 |
+
######################################################################
|
| 2263 |
+
# This may move to a different file sometime.
|
| 2264 |
+
|
| 2265 |
+
|
| 2266 |
+
def build_index(root, base_url):
|
| 2267 |
+
"""
|
| 2268 |
+
Create a new data.xml index file, by combining the xml description
|
| 2269 |
+
files for various packages and collections. ``root`` should be the
|
| 2270 |
+
path to a directory containing the package xml and zip files; and
|
| 2271 |
+
the collection xml files. The ``root`` directory is expected to
|
| 2272 |
+
have the following subdirectories::
|
| 2273 |
+
|
| 2274 |
+
root/
|
| 2275 |
+
packages/ .................. subdirectory for packages
|
| 2276 |
+
corpora/ ................. zip & xml files for corpora
|
| 2277 |
+
grammars/ ................ zip & xml files for grammars
|
| 2278 |
+
taggers/ ................. zip & xml files for taggers
|
| 2279 |
+
tokenizers/ .............. zip & xml files for tokenizers
|
| 2280 |
+
etc.
|
| 2281 |
+
collections/ ............... xml files for collections
|
| 2282 |
+
|
| 2283 |
+
For each package, there should be two files: ``package.zip``
|
| 2284 |
+
(where *package* is the package name)
|
| 2285 |
+
which contains the package itself as a compressed zip file; and
|
| 2286 |
+
``package.xml``, which is an xml description of the package. The
|
| 2287 |
+
zipfile ``package.zip`` should expand to a single subdirectory
|
| 2288 |
+
named ``package/``. The base filename ``package`` must match
|
| 2289 |
+
the identifier given in the package's xml file.
|
| 2290 |
+
|
| 2291 |
+
For each collection, there should be a single file ``collection.zip``
|
| 2292 |
+
describing the collection, where *collection* is the name of the collection.
|
| 2293 |
+
|
| 2294 |
+
All identifiers (for both packages and collections) must be unique.
|
| 2295 |
+
"""
|
| 2296 |
+
# Find all packages.
|
| 2297 |
+
packages = []
|
| 2298 |
+
for pkg_xml, zf, subdir in _find_packages(os.path.join(root, "packages")):
|
| 2299 |
+
zipstat = os.stat(zf.filename)
|
| 2300 |
+
url = f"{base_url}/{subdir}/{os.path.split(zf.filename)[1]}"
|
| 2301 |
+
unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist())
|
| 2302 |
+
|
| 2303 |
+
# Fill in several fields of the package xml with calculated values.
|
| 2304 |
+
pkg_xml.set("unzipped_size", "%s" % unzipped_size)
|
| 2305 |
+
pkg_xml.set("size", "%s" % zipstat.st_size)
|
| 2306 |
+
pkg_xml.set("checksum", "%s" % md5_hexdigest(zf.filename))
|
| 2307 |
+
pkg_xml.set("subdir", subdir)
|
| 2308 |
+
# pkg_xml.set('svn_revision', _svn_revision(zf.filename))
|
| 2309 |
+
if not pkg_xml.get("url"):
|
| 2310 |
+
pkg_xml.set("url", url)
|
| 2311 |
+
|
| 2312 |
+
# Record the package.
|
| 2313 |
+
packages.append(pkg_xml)
|
| 2314 |
+
|
| 2315 |
+
# Find all collections
|
| 2316 |
+
collections = list(_find_collections(os.path.join(root, "collections")))
|
| 2317 |
+
|
| 2318 |
+
# Check that all UIDs are unique
|
| 2319 |
+
uids = set()
|
| 2320 |
+
for item in packages + collections:
|
| 2321 |
+
if item.get("id") in uids:
|
| 2322 |
+
raise ValueError("Duplicate UID: %s" % item.get("id"))
|
| 2323 |
+
uids.add(item.get("id"))
|
| 2324 |
+
|
| 2325 |
+
# Put it all together
|
| 2326 |
+
top_elt = ElementTree.Element("nltk_data")
|
| 2327 |
+
top_elt.append(ElementTree.Element("packages"))
|
| 2328 |
+
top_elt[0].extend(sorted(packages, key=lambda package: package.get("id")))
|
| 2329 |
+
top_elt.append(ElementTree.Element("collections"))
|
| 2330 |
+
top_elt[1].extend(sorted(collections, key=lambda collection: collection.get("id")))
|
| 2331 |
+
|
| 2332 |
+
_indent_xml(top_elt)
|
| 2333 |
+
return top_elt
|
| 2334 |
+
|
| 2335 |
+
|
| 2336 |
+
def _indent_xml(xml, prefix=""):
|
| 2337 |
+
"""
|
| 2338 |
+
Helper for ``build_index()``: Given an XML ``ElementTree``, modify it
|
| 2339 |
+
(and its descendents) ``text`` and ``tail`` attributes to generate
|
| 2340 |
+
an indented tree, where each nested element is indented by 2
|
| 2341 |
+
spaces with respect to its parent.
|
| 2342 |
+
"""
|
| 2343 |
+
if len(xml) > 0:
|
| 2344 |
+
xml.text = (xml.text or "").strip() + "\n" + prefix + " "
|
| 2345 |
+
for child in xml:
|
| 2346 |
+
_indent_xml(child, prefix + " ")
|
| 2347 |
+
for child in xml[:-1]:
|
| 2348 |
+
child.tail = (child.tail or "").strip() + "\n" + prefix + " "
|
| 2349 |
+
xml[-1].tail = (xml[-1].tail or "").strip() + "\n" + prefix
|
| 2350 |
+
|
| 2351 |
+
|
| 2352 |
+
def _check_package(pkg_xml, zipfilename, zf):
|
| 2353 |
+
"""
|
| 2354 |
+
Helper for ``build_index()``: Perform some checks to make sure that
|
| 2355 |
+
the given package is consistent.
|
| 2356 |
+
"""
|
| 2357 |
+
# The filename must patch the id given in the XML file.
|
| 2358 |
+
uid = os.path.splitext(os.path.split(zipfilename)[1])[0]
|
| 2359 |
+
if pkg_xml.get("id") != uid:
|
| 2360 |
+
raise ValueError(
|
| 2361 |
+
"package identifier mismatch ({} vs {})".format(pkg_xml.get("id"), uid)
|
| 2362 |
+
)
|
| 2363 |
+
|
| 2364 |
+
# Zip file must expand to a subdir whose name matches uid.
|
| 2365 |
+
if sum((name != uid and not name.startswith(uid + "/")) for name in zf.namelist()):
|
| 2366 |
+
raise ValueError(
|
| 2367 |
+
"Zipfile %s.zip does not expand to a single "
|
| 2368 |
+
"subdirectory %s/" % (uid, uid)
|
| 2369 |
+
)
|
| 2370 |
+
|
| 2371 |
+
|
| 2372 |
+
# update for git?
|
| 2373 |
+
def _svn_revision(filename):
|
| 2374 |
+
"""
|
| 2375 |
+
Helper for ``build_index()``: Calculate the subversion revision
|
| 2376 |
+
number for a given file (by using ``subprocess`` to run ``svn``).
|
| 2377 |
+
"""
|
| 2378 |
+
p = subprocess.Popen(
|
| 2379 |
+
["svn", "status", "-v", filename],
|
| 2380 |
+
stdout=subprocess.PIPE,
|
| 2381 |
+
stderr=subprocess.PIPE,
|
| 2382 |
+
)
|
| 2383 |
+
(stdout, stderr) = p.communicate()
|
| 2384 |
+
if p.returncode != 0 or stderr or not stdout:
|
| 2385 |
+
raise ValueError(
|
| 2386 |
+
"Error determining svn_revision for %s: %s"
|
| 2387 |
+
% (os.path.split(filename)[1], textwrap.fill(stderr))
|
| 2388 |
+
)
|
| 2389 |
+
return stdout.split()[2]
|
| 2390 |
+
|
| 2391 |
+
|
| 2392 |
+
def _find_collections(root):
|
| 2393 |
+
"""
|
| 2394 |
+
Helper for ``build_index()``: Yield a list of ElementTree.Element
|
| 2395 |
+
objects, each holding the xml for a single package collection.
|
| 2396 |
+
"""
|
| 2397 |
+
for dirname, _subdirs, files in os.walk(root):
|
| 2398 |
+
for filename in files:
|
| 2399 |
+
if filename.endswith(".xml"):
|
| 2400 |
+
xmlfile = os.path.join(dirname, filename)
|
| 2401 |
+
yield ElementTree.parse(xmlfile).getroot()
|
| 2402 |
+
|
| 2403 |
+
|
| 2404 |
+
def _find_packages(root):
|
| 2405 |
+
"""
|
| 2406 |
+
Helper for ``build_index()``: Yield a list of tuples
|
| 2407 |
+
``(pkg_xml, zf, subdir)``, where:
|
| 2408 |
+
- ``pkg_xml`` is an ``ElementTree.Element`` holding the xml for a
|
| 2409 |
+
package
|
| 2410 |
+
- ``zf`` is a ``zipfile.ZipFile`` for the package's contents.
|
| 2411 |
+
- ``subdir`` is the subdirectory (relative to ``root``) where
|
| 2412 |
+
the package was found (e.g. 'corpora' or 'grammars').
|
| 2413 |
+
"""
|
| 2414 |
+
from nltk.corpus.reader.util import _path_from
|
| 2415 |
+
|
| 2416 |
+
# Find all packages.
|
| 2417 |
+
packages = []
|
| 2418 |
+
for dirname, subdirs, files in os.walk(root):
|
| 2419 |
+
relpath = "/".join(_path_from(root, dirname))
|
| 2420 |
+
for filename in files:
|
| 2421 |
+
if filename.endswith(".xml"):
|
| 2422 |
+
xmlfilename = os.path.join(dirname, filename)
|
| 2423 |
+
zipfilename = xmlfilename[:-4] + ".zip"
|
| 2424 |
+
try:
|
| 2425 |
+
zf = zipfile.ZipFile(zipfilename)
|
| 2426 |
+
except Exception as e:
|
| 2427 |
+
raise ValueError(f"Error reading file {zipfilename!r}!\n{e}") from e
|
| 2428 |
+
try:
|
| 2429 |
+
pkg_xml = ElementTree.parse(xmlfilename).getroot()
|
| 2430 |
+
except Exception as e:
|
| 2431 |
+
raise ValueError(f"Error reading file {xmlfilename!r}!\n{e}") from e
|
| 2432 |
+
|
| 2433 |
+
# Check that the UID matches the filename
|
| 2434 |
+
uid = os.path.split(xmlfilename[:-4])[1]
|
| 2435 |
+
if pkg_xml.get("id") != uid:
|
| 2436 |
+
raise ValueError(
|
| 2437 |
+
"package identifier mismatch (%s "
|
| 2438 |
+
"vs %s)" % (pkg_xml.get("id"), uid)
|
| 2439 |
+
)
|
| 2440 |
+
|
| 2441 |
+
# Check that the zipfile expands to a subdir whose
|
| 2442 |
+
# name matches the uid.
|
| 2443 |
+
if sum(
|
| 2444 |
+
(name != uid and not name.startswith(uid + "/"))
|
| 2445 |
+
for name in zf.namelist()
|
| 2446 |
+
):
|
| 2447 |
+
raise ValueError(
|
| 2448 |
+
"Zipfile %s.zip does not expand to a "
|
| 2449 |
+
"single subdirectory %s/" % (uid, uid)
|
| 2450 |
+
)
|
| 2451 |
+
|
| 2452 |
+
yield pkg_xml, zf, relpath
|
| 2453 |
+
|
| 2454 |
+
elif filename.endswith(".zip"):
|
| 2455 |
+
# Warn user in case a .xml does not exist for a .zip
|
| 2456 |
+
resourcename = os.path.splitext(filename)[0]
|
| 2457 |
+
xmlfilename = os.path.join(dirname, resourcename + ".xml")
|
| 2458 |
+
if not os.path.exists(xmlfilename):
|
| 2459 |
+
warnings.warn(
|
| 2460 |
+
f"{filename} exists, but {resourcename + '.xml'} cannot be found! "
|
| 2461 |
+
f"This could mean that {resourcename} can not be downloaded.",
|
| 2462 |
+
stacklevel=2,
|
| 2463 |
+
)
|
| 2464 |
+
|
| 2465 |
+
# Don't recurse into svn subdirectories:
|
| 2466 |
+
try:
|
| 2467 |
+
subdirs.remove(".svn")
|
| 2468 |
+
except ValueError:
|
| 2469 |
+
pass
|
| 2470 |
+
|
| 2471 |
+
|
| 2472 |
+
######################################################################
|
| 2473 |
+
# Main:
|
| 2474 |
+
######################################################################
|
| 2475 |
+
|
| 2476 |
+
# There should be a command-line interface
|
| 2477 |
+
|
| 2478 |
+
# Aliases
|
| 2479 |
+
_downloader = Downloader()
|
| 2480 |
+
download = _downloader.download
|
| 2481 |
+
|
| 2482 |
+
|
| 2483 |
+
def download_shell():
|
| 2484 |
+
DownloaderShell(_downloader).run()
|
| 2485 |
+
|
| 2486 |
+
|
| 2487 |
+
def download_gui():
|
| 2488 |
+
DownloaderGUI(_downloader).mainloop()
|
| 2489 |
+
|
| 2490 |
+
|
| 2491 |
+
def update():
|
| 2492 |
+
_downloader.update()
|
| 2493 |
+
|
| 2494 |
+
|
| 2495 |
+
if __name__ == "__main__":
|
| 2496 |
+
from optparse import OptionParser
|
| 2497 |
+
|
| 2498 |
+
parser = OptionParser()
|
| 2499 |
+
parser.add_option(
|
| 2500 |
+
"-d",
|
| 2501 |
+
"--dir",
|
| 2502 |
+
dest="dir",
|
| 2503 |
+
help="download package to directory DIR",
|
| 2504 |
+
metavar="DIR",
|
| 2505 |
+
)
|
| 2506 |
+
parser.add_option(
|
| 2507 |
+
"-q",
|
| 2508 |
+
"--quiet",
|
| 2509 |
+
dest="quiet",
|
| 2510 |
+
action="store_true",
|
| 2511 |
+
default=False,
|
| 2512 |
+
help="work quietly",
|
| 2513 |
+
)
|
| 2514 |
+
parser.add_option(
|
| 2515 |
+
"-f",
|
| 2516 |
+
"--force",
|
| 2517 |
+
dest="force",
|
| 2518 |
+
action="store_true",
|
| 2519 |
+
default=False,
|
| 2520 |
+
help="download even if already installed",
|
| 2521 |
+
)
|
| 2522 |
+
parser.add_option(
|
| 2523 |
+
"-e",
|
| 2524 |
+
"--exit-on-error",
|
| 2525 |
+
dest="halt_on_error",
|
| 2526 |
+
action="store_true",
|
| 2527 |
+
default=False,
|
| 2528 |
+
help="exit if an error occurs",
|
| 2529 |
+
)
|
| 2530 |
+
parser.add_option(
|
| 2531 |
+
"-u",
|
| 2532 |
+
"--url",
|
| 2533 |
+
dest="server_index_url",
|
| 2534 |
+
default=os.environ.get("NLTK_DOWNLOAD_URL"),
|
| 2535 |
+
help="download server index url",
|
| 2536 |
+
)
|
| 2537 |
+
|
| 2538 |
+
(options, args) = parser.parse_args()
|
| 2539 |
+
|
| 2540 |
+
downloader = Downloader(server_index_url=options.server_index_url)
|
| 2541 |
+
|
| 2542 |
+
if args:
|
| 2543 |
+
for pkg_id in args:
|
| 2544 |
+
rv = downloader.download(
|
| 2545 |
+
info_or_id=pkg_id,
|
| 2546 |
+
download_dir=options.dir,
|
| 2547 |
+
quiet=options.quiet,
|
| 2548 |
+
force=options.force,
|
| 2549 |
+
halt_on_error=options.halt_on_error,
|
| 2550 |
+
)
|
| 2551 |
+
if rv == False and options.halt_on_error:
|
| 2552 |
+
break
|
| 2553 |
+
else:
|
| 2554 |
+
downloader.download(
|
| 2555 |
+
download_dir=options.dir,
|
| 2556 |
+
quiet=options.quiet,
|
| 2557 |
+
force=options.force,
|
| 2558 |
+
halt_on_error=options.halt_on_error,
|
| 2559 |
+
)
|
lib/python3.10/site-packages/nltk/featstruct.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
lib/python3.10/site-packages/nltk/grammar.py
ADDED
|
@@ -0,0 +1,1708 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: Context Free Grammars
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Author: Steven Bird <stevenbird1@gmail.com>
|
| 5 |
+
# Edward Loper <edloper@gmail.com>
|
| 6 |
+
# Jason Narad <jason.narad@gmail.com>
|
| 7 |
+
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se>
|
| 8 |
+
# Tom Aarsen <>
|
| 9 |
+
# URL: <https://www.nltk.org/>
|
| 10 |
+
# For license information, see LICENSE.TXT
|
| 11 |
+
#
|
| 12 |
+
|
| 13 |
+
"""
|
| 14 |
+
Basic data classes for representing context free grammars. A
|
| 15 |
+
"grammar" specifies which trees can represent the structure of a
|
| 16 |
+
given text. Each of these trees is called a "parse tree" for the
|
| 17 |
+
text (or simply a "parse"). In a "context free" grammar, the set of
|
| 18 |
+
parse trees for any piece of a text can depend only on that piece, and
|
| 19 |
+
not on the rest of the text (i.e., the piece's context). Context free
|
| 20 |
+
grammars are often used to find possible syntactic structures for
|
| 21 |
+
sentences. In this context, the leaves of a parse tree are word
|
| 22 |
+
tokens; and the node values are phrasal categories, such as ``NP``
|
| 23 |
+
and ``VP``.
|
| 24 |
+
|
| 25 |
+
The ``CFG`` class is used to encode context free grammars. Each
|
| 26 |
+
``CFG`` consists of a start symbol and a set of productions.
|
| 27 |
+
The "start symbol" specifies the root node value for parse trees. For example,
|
| 28 |
+
the start symbol for syntactic parsing is usually ``S``. Start
|
| 29 |
+
symbols are encoded using the ``Nonterminal`` class, which is discussed
|
| 30 |
+
below.
|
| 31 |
+
|
| 32 |
+
A Grammar's "productions" specify what parent-child relationships a parse
|
| 33 |
+
tree can contain. Each production specifies that a particular
|
| 34 |
+
node can be the parent of a particular set of children. For example,
|
| 35 |
+
the production ``<S> -> <NP> <VP>`` specifies that an ``S`` node can
|
| 36 |
+
be the parent of an ``NP`` node and a ``VP`` node.
|
| 37 |
+
|
| 38 |
+
Grammar productions are implemented by the ``Production`` class.
|
| 39 |
+
Each ``Production`` consists of a left hand side and a right hand
|
| 40 |
+
side. The "left hand side" is a ``Nonterminal`` that specifies the
|
| 41 |
+
node type for a potential parent; and the "right hand side" is a list
|
| 42 |
+
that specifies allowable children for that parent. This lists
|
| 43 |
+
consists of ``Nonterminals`` and text types: each ``Nonterminal``
|
| 44 |
+
indicates that the corresponding child may be a ``TreeToken`` with the
|
| 45 |
+
specified node type; and each text type indicates that the
|
| 46 |
+
corresponding child may be a ``Token`` with the with that type.
|
| 47 |
+
|
| 48 |
+
The ``Nonterminal`` class is used to distinguish node values from leaf
|
| 49 |
+
values. This prevents the grammar from accidentally using a leaf
|
| 50 |
+
value (such as the English word "A") as the node of a subtree. Within
|
| 51 |
+
a ``CFG``, all node values are wrapped in the ``Nonterminal``
|
| 52 |
+
class. Note, however, that the trees that are specified by the grammar do
|
| 53 |
+
*not* include these ``Nonterminal`` wrappers.
|
| 54 |
+
|
| 55 |
+
Grammars can also be given a more procedural interpretation. According to
|
| 56 |
+
this interpretation, a Grammar specifies any tree structure *tree* that
|
| 57 |
+
can be produced by the following procedure:
|
| 58 |
+
|
| 59 |
+
| Set tree to the start symbol
|
| 60 |
+
| Repeat until tree contains no more nonterminal leaves:
|
| 61 |
+
| Choose a production prod with whose left hand side
|
| 62 |
+
| lhs is a nonterminal leaf of tree.
|
| 63 |
+
| Replace the nonterminal leaf with a subtree, whose node
|
| 64 |
+
| value is the value wrapped by the nonterminal lhs, and
|
| 65 |
+
| whose children are the right hand side of prod.
|
| 66 |
+
|
| 67 |
+
The operation of replacing the left hand side (*lhs*) of a production
|
| 68 |
+
with the right hand side (*rhs*) in a tree (*tree*) is known as
|
| 69 |
+
"expanding" *lhs* to *rhs* in *tree*.
|
| 70 |
+
"""
|
| 71 |
+
import re
|
| 72 |
+
from functools import total_ordering
|
| 73 |
+
|
| 74 |
+
from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader
|
| 75 |
+
from nltk.internals import raise_unorderable_types
|
| 76 |
+
from nltk.probability import ImmutableProbabilisticMixIn
|
| 77 |
+
from nltk.util import invert_graph, transitive_closure
|
| 78 |
+
|
| 79 |
+
#################################################################
|
| 80 |
+
# Nonterminal
|
| 81 |
+
#################################################################
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@total_ordering
|
| 85 |
+
class Nonterminal:
|
| 86 |
+
"""
|
| 87 |
+
A non-terminal symbol for a context free grammar. ``Nonterminal``
|
| 88 |
+
is a wrapper class for node values; it is used by ``Production``
|
| 89 |
+
objects to distinguish node values from leaf values.
|
| 90 |
+
The node value that is wrapped by a ``Nonterminal`` is known as its
|
| 91 |
+
"symbol". Symbols are typically strings representing phrasal
|
| 92 |
+
categories (such as ``"NP"`` or ``"VP"``). However, more complex
|
| 93 |
+
symbol types are sometimes used (e.g., for lexicalized grammars).
|
| 94 |
+
Since symbols are node values, they must be immutable and
|
| 95 |
+
hashable. Two ``Nonterminals`` are considered equal if their
|
| 96 |
+
symbols are equal.
|
| 97 |
+
|
| 98 |
+
:see: ``CFG``, ``Production``
|
| 99 |
+
:type _symbol: any
|
| 100 |
+
:ivar _symbol: The node value corresponding to this
|
| 101 |
+
``Nonterminal``. This value must be immutable and hashable.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(self, symbol):
|
| 105 |
+
"""
|
| 106 |
+
Construct a new non-terminal from the given symbol.
|
| 107 |
+
|
| 108 |
+
:type symbol: any
|
| 109 |
+
:param symbol: The node value corresponding to this
|
| 110 |
+
``Nonterminal``. This value must be immutable and
|
| 111 |
+
hashable.
|
| 112 |
+
"""
|
| 113 |
+
self._symbol = symbol
|
| 114 |
+
|
| 115 |
+
def symbol(self):
|
| 116 |
+
"""
|
| 117 |
+
Return the node value corresponding to this ``Nonterminal``.
|
| 118 |
+
|
| 119 |
+
:rtype: (any)
|
| 120 |
+
"""
|
| 121 |
+
return self._symbol
|
| 122 |
+
|
| 123 |
+
def __eq__(self, other):
|
| 124 |
+
"""
|
| 125 |
+
Return True if this non-terminal is equal to ``other``. In
|
| 126 |
+
particular, return True if ``other`` is a ``Nonterminal``
|
| 127 |
+
and this non-terminal's symbol is equal to ``other`` 's symbol.
|
| 128 |
+
|
| 129 |
+
:rtype: bool
|
| 130 |
+
"""
|
| 131 |
+
return type(self) == type(other) and self._symbol == other._symbol
|
| 132 |
+
|
| 133 |
+
def __ne__(self, other):
|
| 134 |
+
return not self == other
|
| 135 |
+
|
| 136 |
+
def __lt__(self, other):
|
| 137 |
+
if not isinstance(other, Nonterminal):
|
| 138 |
+
raise_unorderable_types("<", self, other)
|
| 139 |
+
return self._symbol < other._symbol
|
| 140 |
+
|
| 141 |
+
def __hash__(self):
|
| 142 |
+
return hash(self._symbol)
|
| 143 |
+
|
| 144 |
+
def __repr__(self):
|
| 145 |
+
"""
|
| 146 |
+
Return a string representation for this ``Nonterminal``.
|
| 147 |
+
|
| 148 |
+
:rtype: str
|
| 149 |
+
"""
|
| 150 |
+
if isinstance(self._symbol, str):
|
| 151 |
+
return "%s" % self._symbol
|
| 152 |
+
else:
|
| 153 |
+
return "%s" % repr(self._symbol)
|
| 154 |
+
|
| 155 |
+
def __str__(self):
|
| 156 |
+
"""
|
| 157 |
+
Return a string representation for this ``Nonterminal``.
|
| 158 |
+
|
| 159 |
+
:rtype: str
|
| 160 |
+
"""
|
| 161 |
+
if isinstance(self._symbol, str):
|
| 162 |
+
return "%s" % self._symbol
|
| 163 |
+
else:
|
| 164 |
+
return "%s" % repr(self._symbol)
|
| 165 |
+
|
| 166 |
+
def __div__(self, rhs):
|
| 167 |
+
"""
|
| 168 |
+
Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
|
| 169 |
+
the symbol for this nonterminal, and ``B`` is the symbol for rhs.
|
| 170 |
+
|
| 171 |
+
:param rhs: The nonterminal used to form the right hand side
|
| 172 |
+
of the new nonterminal.
|
| 173 |
+
:type rhs: Nonterminal
|
| 174 |
+
:rtype: Nonterminal
|
| 175 |
+
"""
|
| 176 |
+
return Nonterminal(f"{self._symbol}/{rhs._symbol}")
|
| 177 |
+
|
| 178 |
+
def __truediv__(self, rhs):
|
| 179 |
+
"""
|
| 180 |
+
Return a new nonterminal whose symbol is ``A/B``, where ``A`` is
|
| 181 |
+
the symbol for this nonterminal, and ``B`` is the symbol for rhs.
|
| 182 |
+
This function allows use of the slash ``/`` operator with
|
| 183 |
+
the future import of division.
|
| 184 |
+
|
| 185 |
+
:param rhs: The nonterminal used to form the right hand side
|
| 186 |
+
of the new nonterminal.
|
| 187 |
+
:type rhs: Nonterminal
|
| 188 |
+
:rtype: Nonterminal
|
| 189 |
+
"""
|
| 190 |
+
return self.__div__(rhs)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def nonterminals(symbols):
|
| 194 |
+
"""
|
| 195 |
+
Given a string containing a list of symbol names, return a list of
|
| 196 |
+
``Nonterminals`` constructed from those symbols.
|
| 197 |
+
|
| 198 |
+
:param symbols: The symbol name string. This string can be
|
| 199 |
+
delimited by either spaces or commas.
|
| 200 |
+
:type symbols: str
|
| 201 |
+
:return: A list of ``Nonterminals`` constructed from the symbol
|
| 202 |
+
names given in ``symbols``. The ``Nonterminals`` are sorted
|
| 203 |
+
in the same order as the symbols names.
|
| 204 |
+
:rtype: list(Nonterminal)
|
| 205 |
+
"""
|
| 206 |
+
if "," in symbols:
|
| 207 |
+
symbol_list = symbols.split(",")
|
| 208 |
+
else:
|
| 209 |
+
symbol_list = symbols.split()
|
| 210 |
+
return [Nonterminal(s.strip()) for s in symbol_list]
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class FeatStructNonterminal(FeatDict, Nonterminal):
|
| 214 |
+
"""A feature structure that's also a nonterminal. It acts as its
|
| 215 |
+
own symbol, and automatically freezes itself when hashed."""
|
| 216 |
+
|
| 217 |
+
def __hash__(self):
|
| 218 |
+
self.freeze()
|
| 219 |
+
return FeatStruct.__hash__(self)
|
| 220 |
+
|
| 221 |
+
def symbol(self):
|
| 222 |
+
return self
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def is_nonterminal(item):
|
| 226 |
+
"""
|
| 227 |
+
:return: True if the item is a ``Nonterminal``.
|
| 228 |
+
:rtype: bool
|
| 229 |
+
"""
|
| 230 |
+
return isinstance(item, Nonterminal)
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
#################################################################
|
| 234 |
+
# Terminals
|
| 235 |
+
#################################################################
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def is_terminal(item):
|
| 239 |
+
"""
|
| 240 |
+
Return True if the item is a terminal, which currently is
|
| 241 |
+
if it is hashable and not a ``Nonterminal``.
|
| 242 |
+
|
| 243 |
+
:rtype: bool
|
| 244 |
+
"""
|
| 245 |
+
return hasattr(item, "__hash__") and not isinstance(item, Nonterminal)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
#################################################################
|
| 249 |
+
# Productions
|
| 250 |
+
#################################################################
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
@total_ordering
|
| 254 |
+
class Production:
|
| 255 |
+
"""
|
| 256 |
+
A grammar production. Each production maps a single symbol
|
| 257 |
+
on the "left-hand side" to a sequence of symbols on the
|
| 258 |
+
"right-hand side". (In the case of context-free productions,
|
| 259 |
+
the left-hand side must be a ``Nonterminal``, and the right-hand
|
| 260 |
+
side is a sequence of terminals and ``Nonterminals``.)
|
| 261 |
+
"terminals" can be any immutable hashable object that is
|
| 262 |
+
not a ``Nonterminal``. Typically, terminals are strings
|
| 263 |
+
representing words, such as ``"dog"`` or ``"under"``.
|
| 264 |
+
|
| 265 |
+
:see: ``CFG``
|
| 266 |
+
:see: ``DependencyGrammar``
|
| 267 |
+
:see: ``Nonterminal``
|
| 268 |
+
:type _lhs: Nonterminal
|
| 269 |
+
:ivar _lhs: The left-hand side of the production.
|
| 270 |
+
:type _rhs: tuple(Nonterminal, terminal)
|
| 271 |
+
:ivar _rhs: The right-hand side of the production.
|
| 272 |
+
"""
|
| 273 |
+
|
| 274 |
+
def __init__(self, lhs, rhs):
|
| 275 |
+
"""
|
| 276 |
+
Construct a new ``Production``.
|
| 277 |
+
|
| 278 |
+
:param lhs: The left-hand side of the new ``Production``.
|
| 279 |
+
:type lhs: Nonterminal
|
| 280 |
+
:param rhs: The right-hand side of the new ``Production``.
|
| 281 |
+
:type rhs: sequence(Nonterminal and terminal)
|
| 282 |
+
"""
|
| 283 |
+
if isinstance(rhs, str):
|
| 284 |
+
raise TypeError(
|
| 285 |
+
"production right hand side should be a list, " "not a string"
|
| 286 |
+
)
|
| 287 |
+
self._lhs = lhs
|
| 288 |
+
self._rhs = tuple(rhs)
|
| 289 |
+
|
| 290 |
+
def lhs(self):
|
| 291 |
+
"""
|
| 292 |
+
Return the left-hand side of this ``Production``.
|
| 293 |
+
|
| 294 |
+
:rtype: Nonterminal
|
| 295 |
+
"""
|
| 296 |
+
return self._lhs
|
| 297 |
+
|
| 298 |
+
def rhs(self):
|
| 299 |
+
"""
|
| 300 |
+
Return the right-hand side of this ``Production``.
|
| 301 |
+
|
| 302 |
+
:rtype: sequence(Nonterminal and terminal)
|
| 303 |
+
"""
|
| 304 |
+
return self._rhs
|
| 305 |
+
|
| 306 |
+
def __len__(self):
|
| 307 |
+
"""
|
| 308 |
+
Return the length of the right-hand side.
|
| 309 |
+
|
| 310 |
+
:rtype: int
|
| 311 |
+
"""
|
| 312 |
+
return len(self._rhs)
|
| 313 |
+
|
| 314 |
+
def is_nonlexical(self):
|
| 315 |
+
"""
|
| 316 |
+
Return True if the right-hand side only contains ``Nonterminals``
|
| 317 |
+
|
| 318 |
+
:rtype: bool
|
| 319 |
+
"""
|
| 320 |
+
return all(is_nonterminal(n) for n in self._rhs)
|
| 321 |
+
|
| 322 |
+
def is_lexical(self):
|
| 323 |
+
"""
|
| 324 |
+
Return True if the right-hand contain at least one terminal token.
|
| 325 |
+
|
| 326 |
+
:rtype: bool
|
| 327 |
+
"""
|
| 328 |
+
return not self.is_nonlexical()
|
| 329 |
+
|
| 330 |
+
def __str__(self):
|
| 331 |
+
"""
|
| 332 |
+
Return a verbose string representation of the ``Production``.
|
| 333 |
+
|
| 334 |
+
:rtype: str
|
| 335 |
+
"""
|
| 336 |
+
result = "%s -> " % repr(self._lhs)
|
| 337 |
+
result += " ".join(repr(el) for el in self._rhs)
|
| 338 |
+
return result
|
| 339 |
+
|
| 340 |
+
def __repr__(self):
|
| 341 |
+
"""
|
| 342 |
+
Return a concise string representation of the ``Production``.
|
| 343 |
+
|
| 344 |
+
:rtype: str
|
| 345 |
+
"""
|
| 346 |
+
return "%s" % self
|
| 347 |
+
|
| 348 |
+
def __eq__(self, other):
|
| 349 |
+
"""
|
| 350 |
+
Return True if this ``Production`` is equal to ``other``.
|
| 351 |
+
|
| 352 |
+
:rtype: bool
|
| 353 |
+
"""
|
| 354 |
+
return (
|
| 355 |
+
type(self) == type(other)
|
| 356 |
+
and self._lhs == other._lhs
|
| 357 |
+
and self._rhs == other._rhs
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
def __ne__(self, other):
|
| 361 |
+
return not self == other
|
| 362 |
+
|
| 363 |
+
def __lt__(self, other):
|
| 364 |
+
if not isinstance(other, Production):
|
| 365 |
+
raise_unorderable_types("<", self, other)
|
| 366 |
+
return (self._lhs, self._rhs) < (other._lhs, other._rhs)
|
| 367 |
+
|
| 368 |
+
def __hash__(self):
|
| 369 |
+
"""
|
| 370 |
+
Return a hash value for the ``Production``.
|
| 371 |
+
|
| 372 |
+
:rtype: int
|
| 373 |
+
"""
|
| 374 |
+
return hash((self._lhs, self._rhs))
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
class DependencyProduction(Production):
|
| 378 |
+
"""
|
| 379 |
+
A dependency grammar production. Each production maps a single
|
| 380 |
+
head word to an unordered list of one or more modifier words.
|
| 381 |
+
"""
|
| 382 |
+
|
| 383 |
+
def __str__(self):
|
| 384 |
+
"""
|
| 385 |
+
Return a verbose string representation of the ``DependencyProduction``.
|
| 386 |
+
|
| 387 |
+
:rtype: str
|
| 388 |
+
"""
|
| 389 |
+
result = f"'{self._lhs}' ->"
|
| 390 |
+
for elt in self._rhs:
|
| 391 |
+
result += f" '{elt}'"
|
| 392 |
+
return result
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
class ProbabilisticProduction(Production, ImmutableProbabilisticMixIn):
|
| 396 |
+
"""
|
| 397 |
+
A probabilistic context free grammar production.
|
| 398 |
+
A PCFG ``ProbabilisticProduction`` is essentially just a ``Production`` that
|
| 399 |
+
has an associated probability, which represents how likely it is that
|
| 400 |
+
this production will be used. In particular, the probability of a
|
| 401 |
+
``ProbabilisticProduction`` records the likelihood that its right-hand side is
|
| 402 |
+
the correct instantiation for any given occurrence of its left-hand side.
|
| 403 |
+
|
| 404 |
+
:see: ``Production``
|
| 405 |
+
"""
|
| 406 |
+
|
| 407 |
+
def __init__(self, lhs, rhs, **prob):
|
| 408 |
+
"""
|
| 409 |
+
Construct a new ``ProbabilisticProduction``.
|
| 410 |
+
|
| 411 |
+
:param lhs: The left-hand side of the new ``ProbabilisticProduction``.
|
| 412 |
+
:type lhs: Nonterminal
|
| 413 |
+
:param rhs: The right-hand side of the new ``ProbabilisticProduction``.
|
| 414 |
+
:type rhs: sequence(Nonterminal and terminal)
|
| 415 |
+
:param prob: Probability parameters of the new ``ProbabilisticProduction``.
|
| 416 |
+
"""
|
| 417 |
+
ImmutableProbabilisticMixIn.__init__(self, **prob)
|
| 418 |
+
Production.__init__(self, lhs, rhs)
|
| 419 |
+
|
| 420 |
+
def __str__(self):
|
| 421 |
+
return super().__str__() + (
|
| 422 |
+
" [1.0]" if (self.prob() == 1.0) else " [%g]" % self.prob()
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
def __eq__(self, other):
|
| 426 |
+
return (
|
| 427 |
+
type(self) == type(other)
|
| 428 |
+
and self._lhs == other._lhs
|
| 429 |
+
and self._rhs == other._rhs
|
| 430 |
+
and self.prob() == other.prob()
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
def __ne__(self, other):
|
| 434 |
+
return not self == other
|
| 435 |
+
|
| 436 |
+
def __hash__(self):
|
| 437 |
+
return hash((self._lhs, self._rhs, self.prob()))
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
#################################################################
|
| 441 |
+
# Grammars
|
| 442 |
+
#################################################################
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
class CFG:
|
| 446 |
+
"""
|
| 447 |
+
A context-free grammar. A grammar consists of a start state and
|
| 448 |
+
a set of productions. The set of terminals and nonterminals is
|
| 449 |
+
implicitly specified by the productions.
|
| 450 |
+
|
| 451 |
+
If you need efficient key-based access to productions, you
|
| 452 |
+
can use a subclass to implement it.
|
| 453 |
+
"""
|
| 454 |
+
|
| 455 |
+
def __init__(self, start, productions, calculate_leftcorners=True):
|
| 456 |
+
"""
|
| 457 |
+
Create a new context-free grammar, from the given start state
|
| 458 |
+
and set of ``Production`` instances.
|
| 459 |
+
|
| 460 |
+
:param start: The start symbol
|
| 461 |
+
:type start: Nonterminal
|
| 462 |
+
:param productions: The list of productions that defines the grammar
|
| 463 |
+
:type productions: list(Production)
|
| 464 |
+
:param calculate_leftcorners: False if we don't want to calculate the
|
| 465 |
+
leftcorner relation. In that case, some optimized chart parsers won't work.
|
| 466 |
+
:type calculate_leftcorners: bool
|
| 467 |
+
"""
|
| 468 |
+
if not is_nonterminal(start):
|
| 469 |
+
raise TypeError(
|
| 470 |
+
"start should be a Nonterminal object,"
|
| 471 |
+
" not a %s" % type(start).__name__
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
self._start = start
|
| 475 |
+
self._productions = productions
|
| 476 |
+
self._categories = {prod.lhs() for prod in productions}
|
| 477 |
+
self._calculate_indexes()
|
| 478 |
+
self._calculate_grammar_forms()
|
| 479 |
+
if calculate_leftcorners:
|
| 480 |
+
self._calculate_leftcorners()
|
| 481 |
+
|
| 482 |
+
def _calculate_indexes(self):
|
| 483 |
+
self._lhs_index = {}
|
| 484 |
+
self._rhs_index = {}
|
| 485 |
+
self._empty_index = {}
|
| 486 |
+
self._lexical_index = {}
|
| 487 |
+
for prod in self._productions:
|
| 488 |
+
# Left hand side.
|
| 489 |
+
lhs = prod._lhs
|
| 490 |
+
if lhs not in self._lhs_index:
|
| 491 |
+
self._lhs_index[lhs] = []
|
| 492 |
+
self._lhs_index[lhs].append(prod)
|
| 493 |
+
if prod._rhs:
|
| 494 |
+
# First item in right hand side.
|
| 495 |
+
rhs0 = prod._rhs[0]
|
| 496 |
+
if rhs0 not in self._rhs_index:
|
| 497 |
+
self._rhs_index[rhs0] = []
|
| 498 |
+
self._rhs_index[rhs0].append(prod)
|
| 499 |
+
else:
|
| 500 |
+
# The right hand side is empty.
|
| 501 |
+
self._empty_index[prod.lhs()] = prod
|
| 502 |
+
# Lexical tokens in the right hand side.
|
| 503 |
+
for token in prod._rhs:
|
| 504 |
+
if is_terminal(token):
|
| 505 |
+
self._lexical_index.setdefault(token, set()).add(prod)
|
| 506 |
+
|
| 507 |
+
def _calculate_leftcorners(self):
|
| 508 |
+
# Calculate leftcorner relations, for use in optimized parsing.
|
| 509 |
+
self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories}
|
| 510 |
+
self._immediate_leftcorner_words = {cat: set() for cat in self._categories}
|
| 511 |
+
for prod in self.productions():
|
| 512 |
+
if len(prod) > 0:
|
| 513 |
+
cat, left = prod.lhs(), prod.rhs()[0]
|
| 514 |
+
if is_nonterminal(left):
|
| 515 |
+
self._immediate_leftcorner_categories[cat].add(left)
|
| 516 |
+
else:
|
| 517 |
+
self._immediate_leftcorner_words[cat].add(left)
|
| 518 |
+
|
| 519 |
+
lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True)
|
| 520 |
+
self._leftcorners = lc
|
| 521 |
+
self._leftcorner_parents = invert_graph(lc)
|
| 522 |
+
|
| 523 |
+
nr_leftcorner_categories = sum(
|
| 524 |
+
map(len, self._immediate_leftcorner_categories.values())
|
| 525 |
+
)
|
| 526 |
+
nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values()))
|
| 527 |
+
if nr_leftcorner_words > nr_leftcorner_categories > 10000:
|
| 528 |
+
# If the grammar is big, the leftcorner-word dictionary will be too large.
|
| 529 |
+
# In that case it is better to calculate the relation on demand.
|
| 530 |
+
self._leftcorner_words = None
|
| 531 |
+
return
|
| 532 |
+
|
| 533 |
+
self._leftcorner_words = {}
|
| 534 |
+
for cat in self._leftcorners:
|
| 535 |
+
lefts = self._leftcorners[cat]
|
| 536 |
+
lc = self._leftcorner_words[cat] = set()
|
| 537 |
+
for left in lefts:
|
| 538 |
+
lc.update(self._immediate_leftcorner_words.get(left, set()))
|
| 539 |
+
|
| 540 |
+
@classmethod
|
| 541 |
+
def fromstring(cls, input, encoding=None):
|
| 542 |
+
"""
|
| 543 |
+
Return the grammar instance corresponding to the input string(s).
|
| 544 |
+
|
| 545 |
+
:param input: a grammar, either in the form of a string or as a list of strings.
|
| 546 |
+
"""
|
| 547 |
+
start, productions = read_grammar(
|
| 548 |
+
input, standard_nonterm_parser, encoding=encoding
|
| 549 |
+
)
|
| 550 |
+
return cls(start, productions)
|
| 551 |
+
|
| 552 |
+
def start(self):
|
| 553 |
+
"""
|
| 554 |
+
Return the start symbol of the grammar
|
| 555 |
+
|
| 556 |
+
:rtype: Nonterminal
|
| 557 |
+
"""
|
| 558 |
+
return self._start
|
| 559 |
+
|
| 560 |
+
# tricky to balance readability and efficiency here!
|
| 561 |
+
# can't use set operations as they don't preserve ordering
|
| 562 |
+
def productions(self, lhs=None, rhs=None, empty=False):
|
| 563 |
+
"""
|
| 564 |
+
Return the grammar productions, filtered by the left-hand side
|
| 565 |
+
or the first item in the right-hand side.
|
| 566 |
+
|
| 567 |
+
:param lhs: Only return productions with the given left-hand side.
|
| 568 |
+
:param rhs: Only return productions with the given first item
|
| 569 |
+
in the right-hand side.
|
| 570 |
+
:param empty: Only return productions with an empty right-hand side.
|
| 571 |
+
:return: A list of productions matching the given constraints.
|
| 572 |
+
:rtype: list(Production)
|
| 573 |
+
"""
|
| 574 |
+
if rhs and empty:
|
| 575 |
+
raise ValueError(
|
| 576 |
+
"You cannot select empty and non-empty " "productions at the same time."
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
# no constraints so return everything
|
| 580 |
+
if not lhs and not rhs:
|
| 581 |
+
if not empty:
|
| 582 |
+
return self._productions
|
| 583 |
+
else:
|
| 584 |
+
return self._empty_index.values()
|
| 585 |
+
|
| 586 |
+
# only lhs specified so look up its index
|
| 587 |
+
elif lhs and not rhs:
|
| 588 |
+
if not empty:
|
| 589 |
+
return self._lhs_index.get(lhs, [])
|
| 590 |
+
elif lhs in self._empty_index:
|
| 591 |
+
return [self._empty_index[lhs]]
|
| 592 |
+
else:
|
| 593 |
+
return []
|
| 594 |
+
|
| 595 |
+
# only rhs specified so look up its index
|
| 596 |
+
elif rhs and not lhs:
|
| 597 |
+
return self._rhs_index.get(rhs, [])
|
| 598 |
+
|
| 599 |
+
# intersect
|
| 600 |
+
else:
|
| 601 |
+
return [
|
| 602 |
+
prod
|
| 603 |
+
for prod in self._lhs_index.get(lhs, [])
|
| 604 |
+
if prod in self._rhs_index.get(rhs, [])
|
| 605 |
+
]
|
| 606 |
+
|
| 607 |
+
def leftcorners(self, cat):
|
| 608 |
+
"""
|
| 609 |
+
Return the set of all nonterminals that the given nonterminal
|
| 610 |
+
can start with, including itself.
|
| 611 |
+
|
| 612 |
+
This is the reflexive, transitive closure of the immediate
|
| 613 |
+
leftcorner relation: (A > B) iff (A -> B beta)
|
| 614 |
+
|
| 615 |
+
:param cat: the parent of the leftcorners
|
| 616 |
+
:type cat: Nonterminal
|
| 617 |
+
:return: the set of all leftcorners
|
| 618 |
+
:rtype: set(Nonterminal)
|
| 619 |
+
"""
|
| 620 |
+
return self._leftcorners.get(cat, {cat})
|
| 621 |
+
|
| 622 |
+
def is_leftcorner(self, cat, left):
|
| 623 |
+
"""
|
| 624 |
+
True if left is a leftcorner of cat, where left can be a
|
| 625 |
+
terminal or a nonterminal.
|
| 626 |
+
|
| 627 |
+
:param cat: the parent of the leftcorner
|
| 628 |
+
:type cat: Nonterminal
|
| 629 |
+
:param left: the suggested leftcorner
|
| 630 |
+
:type left: Terminal or Nonterminal
|
| 631 |
+
:rtype: bool
|
| 632 |
+
"""
|
| 633 |
+
if is_nonterminal(left):
|
| 634 |
+
return left in self.leftcorners(cat)
|
| 635 |
+
elif self._leftcorner_words:
|
| 636 |
+
return left in self._leftcorner_words.get(cat, set())
|
| 637 |
+
else:
|
| 638 |
+
return any(
|
| 639 |
+
left in self._immediate_leftcorner_words.get(parent, set())
|
| 640 |
+
for parent in self.leftcorners(cat)
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
def leftcorner_parents(self, cat):
|
| 644 |
+
"""
|
| 645 |
+
Return the set of all nonterminals for which the given category
|
| 646 |
+
is a left corner. This is the inverse of the leftcorner relation.
|
| 647 |
+
|
| 648 |
+
:param cat: the suggested leftcorner
|
| 649 |
+
:type cat: Nonterminal
|
| 650 |
+
:return: the set of all parents to the leftcorner
|
| 651 |
+
:rtype: set(Nonterminal)
|
| 652 |
+
"""
|
| 653 |
+
return self._leftcorner_parents.get(cat, {cat})
|
| 654 |
+
|
| 655 |
+
def check_coverage(self, tokens):
|
| 656 |
+
"""
|
| 657 |
+
Check whether the grammar rules cover the given list of tokens.
|
| 658 |
+
If not, then raise an exception.
|
| 659 |
+
|
| 660 |
+
:type tokens: list(str)
|
| 661 |
+
"""
|
| 662 |
+
missing = [tok for tok in tokens if not self._lexical_index.get(tok)]
|
| 663 |
+
if missing:
|
| 664 |
+
missing = ", ".join(f"{w!r}" for w in missing)
|
| 665 |
+
raise ValueError(
|
| 666 |
+
"Grammar does not cover some of the " "input words: %r." % missing
|
| 667 |
+
)
|
| 668 |
+
|
| 669 |
+
def _calculate_grammar_forms(self):
|
| 670 |
+
"""
|
| 671 |
+
Pre-calculate of which form(s) the grammar is.
|
| 672 |
+
"""
|
| 673 |
+
prods = self._productions
|
| 674 |
+
self._is_lexical = all(p.is_lexical() for p in prods)
|
| 675 |
+
self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1)
|
| 676 |
+
self._min_len = min(len(p) for p in prods)
|
| 677 |
+
self._max_len = max(len(p) for p in prods)
|
| 678 |
+
self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1)
|
| 679 |
+
|
| 680 |
+
def is_lexical(self):
|
| 681 |
+
"""
|
| 682 |
+
Return True if all productions are lexicalised.
|
| 683 |
+
"""
|
| 684 |
+
return self._is_lexical
|
| 685 |
+
|
| 686 |
+
def is_nonlexical(self):
|
| 687 |
+
"""
|
| 688 |
+
Return True if all lexical rules are "preterminals", that is,
|
| 689 |
+
unary rules which can be separated in a preprocessing step.
|
| 690 |
+
|
| 691 |
+
This means that all productions are of the forms
|
| 692 |
+
A -> B1 ... Bn (n>=0), or A -> "s".
|
| 693 |
+
|
| 694 |
+
Note: is_lexical() and is_nonlexical() are not opposites.
|
| 695 |
+
There are grammars which are neither, and grammars which are both.
|
| 696 |
+
"""
|
| 697 |
+
return self._is_nonlexical
|
| 698 |
+
|
| 699 |
+
def min_len(self):
|
| 700 |
+
"""
|
| 701 |
+
Return the right-hand side length of the shortest grammar production.
|
| 702 |
+
"""
|
| 703 |
+
return self._min_len
|
| 704 |
+
|
| 705 |
+
def max_len(self):
|
| 706 |
+
"""
|
| 707 |
+
Return the right-hand side length of the longest grammar production.
|
| 708 |
+
"""
|
| 709 |
+
return self._max_len
|
| 710 |
+
|
| 711 |
+
def is_nonempty(self):
|
| 712 |
+
"""
|
| 713 |
+
Return True if there are no empty productions.
|
| 714 |
+
"""
|
| 715 |
+
return self._min_len > 0
|
| 716 |
+
|
| 717 |
+
def is_binarised(self):
|
| 718 |
+
"""
|
| 719 |
+
Return True if all productions are at most binary.
|
| 720 |
+
Note that there can still be empty and unary productions.
|
| 721 |
+
"""
|
| 722 |
+
return self._max_len <= 2
|
| 723 |
+
|
| 724 |
+
def is_flexible_chomsky_normal_form(self):
|
| 725 |
+
"""
|
| 726 |
+
Return True if all productions are of the forms
|
| 727 |
+
A -> B C, A -> B, or A -> "s".
|
| 728 |
+
"""
|
| 729 |
+
return self.is_nonempty() and self.is_nonlexical() and self.is_binarised()
|
| 730 |
+
|
| 731 |
+
def is_chomsky_normal_form(self):
|
| 732 |
+
"""
|
| 733 |
+
Return True if the grammar is of Chomsky Normal Form, i.e. all productions
|
| 734 |
+
are of the form A -> B C, or A -> "s".
|
| 735 |
+
"""
|
| 736 |
+
return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical
|
| 737 |
+
|
| 738 |
+
def chomsky_normal_form(self, new_token_padding="@$@", flexible=False):
|
| 739 |
+
"""
|
| 740 |
+
Returns a new Grammar that is in chomsky normal
|
| 741 |
+
|
| 742 |
+
:param: new_token_padding
|
| 743 |
+
Customise new rule formation during binarisation
|
| 744 |
+
"""
|
| 745 |
+
if self.is_chomsky_normal_form():
|
| 746 |
+
return self
|
| 747 |
+
if self.productions(empty=True):
|
| 748 |
+
raise ValueError(
|
| 749 |
+
"Grammar has Empty rules. " "Cannot deal with them at the moment"
|
| 750 |
+
)
|
| 751 |
+
|
| 752 |
+
# check for mixed rules
|
| 753 |
+
for rule in self.productions():
|
| 754 |
+
if rule.is_lexical() and len(rule.rhs()) > 1:
|
| 755 |
+
raise ValueError(
|
| 756 |
+
f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}"
|
| 757 |
+
)
|
| 758 |
+
|
| 759 |
+
step1 = CFG.eliminate_start(self)
|
| 760 |
+
step2 = CFG.binarize(step1, new_token_padding)
|
| 761 |
+
if flexible:
|
| 762 |
+
return step2
|
| 763 |
+
step3 = CFG.remove_unitary_rules(step2)
|
| 764 |
+
step4 = CFG(step3.start(), list(set(step3.productions())))
|
| 765 |
+
return step4
|
| 766 |
+
|
| 767 |
+
@classmethod
|
| 768 |
+
def remove_unitary_rules(cls, grammar):
|
| 769 |
+
"""
|
| 770 |
+
Remove nonlexical unitary rules and convert them to
|
| 771 |
+
lexical
|
| 772 |
+
"""
|
| 773 |
+
result = []
|
| 774 |
+
unitary = []
|
| 775 |
+
for rule in grammar.productions():
|
| 776 |
+
if len(rule) == 1 and rule.is_nonlexical():
|
| 777 |
+
unitary.append(rule)
|
| 778 |
+
else:
|
| 779 |
+
result.append(rule)
|
| 780 |
+
|
| 781 |
+
while unitary:
|
| 782 |
+
rule = unitary.pop(0)
|
| 783 |
+
for item in grammar.productions(lhs=rule.rhs()[0]):
|
| 784 |
+
new_rule = Production(rule.lhs(), item.rhs())
|
| 785 |
+
if len(new_rule) != 1 or new_rule.is_lexical():
|
| 786 |
+
result.append(new_rule)
|
| 787 |
+
else:
|
| 788 |
+
unitary.append(new_rule)
|
| 789 |
+
|
| 790 |
+
n_grammar = CFG(grammar.start(), result)
|
| 791 |
+
return n_grammar
|
| 792 |
+
|
| 793 |
+
@classmethod
|
| 794 |
+
def binarize(cls, grammar, padding="@$@"):
|
| 795 |
+
"""
|
| 796 |
+
Convert all non-binary rules into binary by introducing
|
| 797 |
+
new tokens.
|
| 798 |
+
Example::
|
| 799 |
+
|
| 800 |
+
Original:
|
| 801 |
+
A => B C D
|
| 802 |
+
After Conversion:
|
| 803 |
+
A => B A@$@B
|
| 804 |
+
A@$@B => C D
|
| 805 |
+
"""
|
| 806 |
+
result = []
|
| 807 |
+
|
| 808 |
+
for rule in grammar.productions():
|
| 809 |
+
if len(rule.rhs()) > 2:
|
| 810 |
+
# this rule needs to be broken down
|
| 811 |
+
left_side = rule.lhs()
|
| 812 |
+
for k in range(0, len(rule.rhs()) - 2):
|
| 813 |
+
tsym = rule.rhs()[k]
|
| 814 |
+
new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol())
|
| 815 |
+
new_production = Production(left_side, (tsym, new_sym))
|
| 816 |
+
left_side = new_sym
|
| 817 |
+
result.append(new_production)
|
| 818 |
+
last_prd = Production(left_side, rule.rhs()[-2:])
|
| 819 |
+
result.append(last_prd)
|
| 820 |
+
else:
|
| 821 |
+
result.append(rule)
|
| 822 |
+
|
| 823 |
+
n_grammar = CFG(grammar.start(), result)
|
| 824 |
+
return n_grammar
|
| 825 |
+
|
| 826 |
+
@classmethod
|
| 827 |
+
def eliminate_start(cls, grammar):
|
| 828 |
+
"""
|
| 829 |
+
Eliminate start rule in case it appears on RHS
|
| 830 |
+
Example: S -> S0 S1 and S0 -> S1 S
|
| 831 |
+
Then another rule S0_Sigma -> S is added
|
| 832 |
+
"""
|
| 833 |
+
start = grammar.start()
|
| 834 |
+
result = []
|
| 835 |
+
need_to_add = None
|
| 836 |
+
for rule in grammar.productions():
|
| 837 |
+
if start in rule.rhs():
|
| 838 |
+
need_to_add = True
|
| 839 |
+
result.append(rule)
|
| 840 |
+
if need_to_add:
|
| 841 |
+
start = Nonterminal("S0_SIGMA")
|
| 842 |
+
result.append(Production(start, [grammar.start()]))
|
| 843 |
+
n_grammar = CFG(start, result)
|
| 844 |
+
return n_grammar
|
| 845 |
+
return grammar
|
| 846 |
+
|
| 847 |
+
def __repr__(self):
|
| 848 |
+
return "<Grammar with %d productions>" % len(self._productions)
|
| 849 |
+
|
| 850 |
+
def __str__(self):
|
| 851 |
+
result = "Grammar with %d productions" % len(self._productions)
|
| 852 |
+
result += " (start state = %r)" % self._start
|
| 853 |
+
for production in self._productions:
|
| 854 |
+
result += "\n %s" % production
|
| 855 |
+
return result
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
class FeatureGrammar(CFG):
|
| 859 |
+
"""
|
| 860 |
+
A feature-based grammar. This is equivalent to a
|
| 861 |
+
``CFG`` whose nonterminals are all
|
| 862 |
+
``FeatStructNonterminal``.
|
| 863 |
+
|
| 864 |
+
A grammar consists of a start state and a set of
|
| 865 |
+
productions. The set of terminals and nonterminals
|
| 866 |
+
is implicitly specified by the productions.
|
| 867 |
+
"""
|
| 868 |
+
|
| 869 |
+
def __init__(self, start, productions):
|
| 870 |
+
"""
|
| 871 |
+
Create a new feature-based grammar, from the given start
|
| 872 |
+
state and set of ``Productions``.
|
| 873 |
+
|
| 874 |
+
:param start: The start symbol
|
| 875 |
+
:type start: FeatStructNonterminal
|
| 876 |
+
:param productions: The list of productions that defines the grammar
|
| 877 |
+
:type productions: list(Production)
|
| 878 |
+
"""
|
| 879 |
+
CFG.__init__(self, start, productions)
|
| 880 |
+
|
| 881 |
+
# The difference with CFG is that the productions are
|
| 882 |
+
# indexed on the TYPE feature of the nonterminals.
|
| 883 |
+
# This is calculated by the method _get_type_if_possible().
|
| 884 |
+
|
| 885 |
+
def _calculate_indexes(self):
|
| 886 |
+
self._lhs_index = {}
|
| 887 |
+
self._rhs_index = {}
|
| 888 |
+
self._empty_index = {}
|
| 889 |
+
self._empty_productions = []
|
| 890 |
+
self._lexical_index = {}
|
| 891 |
+
for prod in self._productions:
|
| 892 |
+
# Left hand side.
|
| 893 |
+
lhs = self._get_type_if_possible(prod._lhs)
|
| 894 |
+
if lhs not in self._lhs_index:
|
| 895 |
+
self._lhs_index[lhs] = []
|
| 896 |
+
self._lhs_index[lhs].append(prod)
|
| 897 |
+
if prod._rhs:
|
| 898 |
+
# First item in right hand side.
|
| 899 |
+
rhs0 = self._get_type_if_possible(prod._rhs[0])
|
| 900 |
+
if rhs0 not in self._rhs_index:
|
| 901 |
+
self._rhs_index[rhs0] = []
|
| 902 |
+
self._rhs_index[rhs0].append(prod)
|
| 903 |
+
else:
|
| 904 |
+
# The right hand side is empty.
|
| 905 |
+
if lhs not in self._empty_index:
|
| 906 |
+
self._empty_index[lhs] = []
|
| 907 |
+
self._empty_index[lhs].append(prod)
|
| 908 |
+
self._empty_productions.append(prod)
|
| 909 |
+
# Lexical tokens in the right hand side.
|
| 910 |
+
for token in prod._rhs:
|
| 911 |
+
if is_terminal(token):
|
| 912 |
+
self._lexical_index.setdefault(token, set()).add(prod)
|
| 913 |
+
|
| 914 |
+
@classmethod
|
| 915 |
+
def fromstring(
|
| 916 |
+
cls, input, features=None, logic_parser=None, fstruct_reader=None, encoding=None
|
| 917 |
+
):
|
| 918 |
+
"""
|
| 919 |
+
Return a feature structure based grammar.
|
| 920 |
+
|
| 921 |
+
:param input: a grammar, either in the form of a string or else
|
| 922 |
+
as a list of strings.
|
| 923 |
+
:param features: a tuple of features (default: SLASH, TYPE)
|
| 924 |
+
:param logic_parser: a parser for lambda-expressions,
|
| 925 |
+
by default, ``LogicParser()``
|
| 926 |
+
:param fstruct_reader: a feature structure parser
|
| 927 |
+
(only if features and logic_parser is None)
|
| 928 |
+
"""
|
| 929 |
+
if features is None:
|
| 930 |
+
features = (SLASH, TYPE)
|
| 931 |
+
|
| 932 |
+
if fstruct_reader is None:
|
| 933 |
+
fstruct_reader = FeatStructReader(
|
| 934 |
+
features, FeatStructNonterminal, logic_parser=logic_parser
|
| 935 |
+
)
|
| 936 |
+
elif logic_parser is not None:
|
| 937 |
+
raise Exception(
|
| 938 |
+
"'logic_parser' and 'fstruct_reader' must " "not both be set"
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
start, productions = read_grammar(
|
| 942 |
+
input, fstruct_reader.read_partial, encoding=encoding
|
| 943 |
+
)
|
| 944 |
+
return cls(start, productions)
|
| 945 |
+
|
| 946 |
+
def productions(self, lhs=None, rhs=None, empty=False):
|
| 947 |
+
"""
|
| 948 |
+
Return the grammar productions, filtered by the left-hand side
|
| 949 |
+
or the first item in the right-hand side.
|
| 950 |
+
|
| 951 |
+
:param lhs: Only return productions with the given left-hand side.
|
| 952 |
+
:param rhs: Only return productions with the given first item
|
| 953 |
+
in the right-hand side.
|
| 954 |
+
:param empty: Only return productions with an empty right-hand side.
|
| 955 |
+
:rtype: list(Production)
|
| 956 |
+
"""
|
| 957 |
+
if rhs and empty:
|
| 958 |
+
raise ValueError(
|
| 959 |
+
"You cannot select empty and non-empty " "productions at the same time."
|
| 960 |
+
)
|
| 961 |
+
|
| 962 |
+
# no constraints so return everything
|
| 963 |
+
if not lhs and not rhs:
|
| 964 |
+
if empty:
|
| 965 |
+
return self._empty_productions
|
| 966 |
+
else:
|
| 967 |
+
return self._productions
|
| 968 |
+
|
| 969 |
+
# only lhs specified so look up its index
|
| 970 |
+
elif lhs and not rhs:
|
| 971 |
+
if empty:
|
| 972 |
+
return self._empty_index.get(self._get_type_if_possible(lhs), [])
|
| 973 |
+
else:
|
| 974 |
+
return self._lhs_index.get(self._get_type_if_possible(lhs), [])
|
| 975 |
+
|
| 976 |
+
# only rhs specified so look up its index
|
| 977 |
+
elif rhs and not lhs:
|
| 978 |
+
return self._rhs_index.get(self._get_type_if_possible(rhs), [])
|
| 979 |
+
|
| 980 |
+
# intersect
|
| 981 |
+
else:
|
| 982 |
+
return [
|
| 983 |
+
prod
|
| 984 |
+
for prod in self._lhs_index.get(self._get_type_if_possible(lhs), [])
|
| 985 |
+
if prod in self._rhs_index.get(self._get_type_if_possible(rhs), [])
|
| 986 |
+
]
|
| 987 |
+
|
| 988 |
+
def leftcorners(self, cat):
|
| 989 |
+
"""
|
| 990 |
+
Return the set of all words that the given category can start with.
|
| 991 |
+
Also called the "first set" in compiler construction.
|
| 992 |
+
"""
|
| 993 |
+
raise NotImplementedError("Not implemented yet")
|
| 994 |
+
|
| 995 |
+
def leftcorner_parents(self, cat):
|
| 996 |
+
"""
|
| 997 |
+
Return the set of all categories for which the given category
|
| 998 |
+
is a left corner.
|
| 999 |
+
"""
|
| 1000 |
+
raise NotImplementedError("Not implemented yet")
|
| 1001 |
+
|
| 1002 |
+
def _get_type_if_possible(self, item):
|
| 1003 |
+
"""
|
| 1004 |
+
Helper function which returns the ``TYPE`` feature of the ``item``,
|
| 1005 |
+
if it exists, otherwise it returns the ``item`` itself
|
| 1006 |
+
"""
|
| 1007 |
+
if isinstance(item, dict) and TYPE in item:
|
| 1008 |
+
return FeatureValueType(item[TYPE])
|
| 1009 |
+
else:
|
| 1010 |
+
return item
|
| 1011 |
+
|
| 1012 |
+
|
| 1013 |
+
@total_ordering
|
| 1014 |
+
class FeatureValueType:
|
| 1015 |
+
"""
|
| 1016 |
+
A helper class for ``FeatureGrammars``, designed to be different
|
| 1017 |
+
from ordinary strings. This is to stop the ``FeatStruct``
|
| 1018 |
+
``FOO[]`` from being compare equal to the terminal "FOO".
|
| 1019 |
+
"""
|
| 1020 |
+
|
| 1021 |
+
def __init__(self, value):
|
| 1022 |
+
self._value = value
|
| 1023 |
+
|
| 1024 |
+
def __repr__(self):
|
| 1025 |
+
return "<%s>" % self._value
|
| 1026 |
+
|
| 1027 |
+
def __eq__(self, other):
|
| 1028 |
+
return type(self) == type(other) and self._value == other._value
|
| 1029 |
+
|
| 1030 |
+
def __ne__(self, other):
|
| 1031 |
+
return not self == other
|
| 1032 |
+
|
| 1033 |
+
def __lt__(self, other):
|
| 1034 |
+
if not isinstance(other, FeatureValueType):
|
| 1035 |
+
raise_unorderable_types("<", self, other)
|
| 1036 |
+
return self._value < other._value
|
| 1037 |
+
|
| 1038 |
+
def __hash__(self):
|
| 1039 |
+
return hash(self._value)
|
| 1040 |
+
|
| 1041 |
+
|
| 1042 |
+
class DependencyGrammar:
|
| 1043 |
+
"""
|
| 1044 |
+
A dependency grammar. A DependencyGrammar consists of a set of
|
| 1045 |
+
productions. Each production specifies a head/modifier relationship
|
| 1046 |
+
between a pair of words.
|
| 1047 |
+
"""
|
| 1048 |
+
|
| 1049 |
+
def __init__(self, productions):
|
| 1050 |
+
"""
|
| 1051 |
+
Create a new dependency grammar, from the set of ``Productions``.
|
| 1052 |
+
|
| 1053 |
+
:param productions: The list of productions that defines the grammar
|
| 1054 |
+
:type productions: list(Production)
|
| 1055 |
+
"""
|
| 1056 |
+
self._productions = productions
|
| 1057 |
+
|
| 1058 |
+
@classmethod
|
| 1059 |
+
def fromstring(cls, input):
|
| 1060 |
+
productions = []
|
| 1061 |
+
for linenum, line in enumerate(input.split("\n")):
|
| 1062 |
+
line = line.strip()
|
| 1063 |
+
if line.startswith("#") or line == "":
|
| 1064 |
+
continue
|
| 1065 |
+
try:
|
| 1066 |
+
productions += _read_dependency_production(line)
|
| 1067 |
+
except ValueError as e:
|
| 1068 |
+
raise ValueError(f"Unable to parse line {linenum}: {line}") from e
|
| 1069 |
+
if len(productions) == 0:
|
| 1070 |
+
raise ValueError("No productions found!")
|
| 1071 |
+
return cls(productions)
|
| 1072 |
+
|
| 1073 |
+
def contains(self, head, mod):
|
| 1074 |
+
"""
|
| 1075 |
+
:param head: A head word.
|
| 1076 |
+
:type head: str
|
| 1077 |
+
:param mod: A mod word, to test as a modifier of 'head'.
|
| 1078 |
+
:type mod: str
|
| 1079 |
+
|
| 1080 |
+
:return: true if this ``DependencyGrammar`` contains a
|
| 1081 |
+
``DependencyProduction`` mapping 'head' to 'mod'.
|
| 1082 |
+
:rtype: bool
|
| 1083 |
+
"""
|
| 1084 |
+
for production in self._productions:
|
| 1085 |
+
for possibleMod in production._rhs:
|
| 1086 |
+
if production._lhs == head and possibleMod == mod:
|
| 1087 |
+
return True
|
| 1088 |
+
return False
|
| 1089 |
+
|
| 1090 |
+
def __contains__(self, head_mod):
|
| 1091 |
+
"""
|
| 1092 |
+
Return True if this ``DependencyGrammar`` contains a
|
| 1093 |
+
``DependencyProduction`` mapping 'head' to 'mod'.
|
| 1094 |
+
|
| 1095 |
+
:param head_mod: A tuple of a head word and a mod word,
|
| 1096 |
+
to test as a modifier of 'head'.
|
| 1097 |
+
:type head: Tuple[str, str]
|
| 1098 |
+
:rtype: bool
|
| 1099 |
+
"""
|
| 1100 |
+
try:
|
| 1101 |
+
head, mod = head_mod
|
| 1102 |
+
except ValueError as e:
|
| 1103 |
+
raise ValueError(
|
| 1104 |
+
"Must use a tuple of strings, e.g. `('price', 'of') in grammar`"
|
| 1105 |
+
) from e
|
| 1106 |
+
return self.contains(head, mod)
|
| 1107 |
+
|
| 1108 |
+
# # should be rewritten, the set comp won't work in all comparisons
|
| 1109 |
+
# def contains_exactly(self, head, modlist):
|
| 1110 |
+
# for production in self._productions:
|
| 1111 |
+
# if(len(production._rhs) == len(modlist)):
|
| 1112 |
+
# if(production._lhs == head):
|
| 1113 |
+
# set1 = Set(production._rhs)
|
| 1114 |
+
# set2 = Set(modlist)
|
| 1115 |
+
# if(set1 == set2):
|
| 1116 |
+
# return True
|
| 1117 |
+
# return False
|
| 1118 |
+
|
| 1119 |
+
def __str__(self):
|
| 1120 |
+
"""
|
| 1121 |
+
Return a verbose string representation of the ``DependencyGrammar``
|
| 1122 |
+
|
| 1123 |
+
:rtype: str
|
| 1124 |
+
"""
|
| 1125 |
+
str = "Dependency grammar with %d productions" % len(self._productions)
|
| 1126 |
+
for production in self._productions:
|
| 1127 |
+
str += "\n %s" % production
|
| 1128 |
+
return str
|
| 1129 |
+
|
| 1130 |
+
def __repr__(self):
|
| 1131 |
+
"""
|
| 1132 |
+
Return a concise string representation of the ``DependencyGrammar``
|
| 1133 |
+
"""
|
| 1134 |
+
return "Dependency grammar with %d productions" % len(self._productions)
|
| 1135 |
+
|
| 1136 |
+
|
| 1137 |
+
class ProbabilisticDependencyGrammar:
|
| 1138 |
+
""" """
|
| 1139 |
+
|
| 1140 |
+
def __init__(self, productions, events, tags):
|
| 1141 |
+
self._productions = productions
|
| 1142 |
+
self._events = events
|
| 1143 |
+
self._tags = tags
|
| 1144 |
+
|
| 1145 |
+
def contains(self, head, mod):
|
| 1146 |
+
"""
|
| 1147 |
+
Return True if this ``DependencyGrammar`` contains a
|
| 1148 |
+
``DependencyProduction`` mapping 'head' to 'mod'.
|
| 1149 |
+
|
| 1150 |
+
:param head: A head word.
|
| 1151 |
+
:type head: str
|
| 1152 |
+
:param mod: A mod word, to test as a modifier of 'head'.
|
| 1153 |
+
:type mod: str
|
| 1154 |
+
:rtype: bool
|
| 1155 |
+
"""
|
| 1156 |
+
for production in self._productions:
|
| 1157 |
+
for possibleMod in production._rhs:
|
| 1158 |
+
if production._lhs == head and possibleMod == mod:
|
| 1159 |
+
return True
|
| 1160 |
+
return False
|
| 1161 |
+
|
| 1162 |
+
def __str__(self):
|
| 1163 |
+
"""
|
| 1164 |
+
Return a verbose string representation of the ``ProbabilisticDependencyGrammar``
|
| 1165 |
+
|
| 1166 |
+
:rtype: str
|
| 1167 |
+
"""
|
| 1168 |
+
str = "Statistical dependency grammar with %d productions" % len(
|
| 1169 |
+
self._productions
|
| 1170 |
+
)
|
| 1171 |
+
for production in self._productions:
|
| 1172 |
+
str += "\n %s" % production
|
| 1173 |
+
str += "\nEvents:"
|
| 1174 |
+
for event in self._events:
|
| 1175 |
+
str += "\n %d:%s" % (self._events[event], event)
|
| 1176 |
+
str += "\nTags:"
|
| 1177 |
+
for tag_word in self._tags:
|
| 1178 |
+
str += f"\n {tag_word}:\t({self._tags[tag_word]})"
|
| 1179 |
+
return str
|
| 1180 |
+
|
| 1181 |
+
def __repr__(self):
|
| 1182 |
+
"""
|
| 1183 |
+
Return a concise string representation of the ``ProbabilisticDependencyGrammar``
|
| 1184 |
+
"""
|
| 1185 |
+
return "Statistical Dependency grammar with %d productions" % len(
|
| 1186 |
+
self._productions
|
| 1187 |
+
)
|
| 1188 |
+
|
| 1189 |
+
|
| 1190 |
+
class PCFG(CFG):
|
| 1191 |
+
"""
|
| 1192 |
+
A probabilistic context-free grammar. A PCFG consists of a
|
| 1193 |
+
start state and a set of productions with probabilities. The set of
|
| 1194 |
+
terminals and nonterminals is implicitly specified by the productions.
|
| 1195 |
+
|
| 1196 |
+
PCFG productions use the ``ProbabilisticProduction`` class.
|
| 1197 |
+
``PCFGs`` impose the constraint that the set of productions with
|
| 1198 |
+
any given left-hand-side must have probabilities that sum to 1
|
| 1199 |
+
(allowing for a small margin of error).
|
| 1200 |
+
|
| 1201 |
+
If you need efficient key-based access to productions, you can use
|
| 1202 |
+
a subclass to implement it.
|
| 1203 |
+
|
| 1204 |
+
:type EPSILON: float
|
| 1205 |
+
:cvar EPSILON: The acceptable margin of error for checking that
|
| 1206 |
+
productions with a given left-hand side have probabilities
|
| 1207 |
+
that sum to 1.
|
| 1208 |
+
"""
|
| 1209 |
+
|
| 1210 |
+
EPSILON = 0.01
|
| 1211 |
+
|
| 1212 |
+
def __init__(self, start, productions, calculate_leftcorners=True):
|
| 1213 |
+
"""
|
| 1214 |
+
Create a new context-free grammar, from the given start state
|
| 1215 |
+
and set of ``ProbabilisticProductions``.
|
| 1216 |
+
|
| 1217 |
+
:param start: The start symbol
|
| 1218 |
+
:type start: Nonterminal
|
| 1219 |
+
:param productions: The list of productions that defines the grammar
|
| 1220 |
+
:type productions: list(Production)
|
| 1221 |
+
:raise ValueError: if the set of productions with any left-hand-side
|
| 1222 |
+
do not have probabilities that sum to a value within
|
| 1223 |
+
EPSILON of 1.
|
| 1224 |
+
:param calculate_leftcorners: False if we don't want to calculate the
|
| 1225 |
+
leftcorner relation. In that case, some optimized chart parsers won't work.
|
| 1226 |
+
:type calculate_leftcorners: bool
|
| 1227 |
+
"""
|
| 1228 |
+
CFG.__init__(self, start, productions, calculate_leftcorners)
|
| 1229 |
+
|
| 1230 |
+
# Make sure that the probabilities sum to one.
|
| 1231 |
+
probs = {}
|
| 1232 |
+
for production in productions:
|
| 1233 |
+
probs[production.lhs()] = probs.get(production.lhs(), 0) + production.prob()
|
| 1234 |
+
for (lhs, p) in probs.items():
|
| 1235 |
+
if not ((1 - PCFG.EPSILON) < p < (1 + PCFG.EPSILON)):
|
| 1236 |
+
raise ValueError("Productions for %r do not sum to 1" % lhs)
|
| 1237 |
+
|
| 1238 |
+
@classmethod
|
| 1239 |
+
def fromstring(cls, input, encoding=None):
|
| 1240 |
+
"""
|
| 1241 |
+
Return a probabilistic context-free grammar corresponding to the
|
| 1242 |
+
input string(s).
|
| 1243 |
+
|
| 1244 |
+
:param input: a grammar, either in the form of a string or else
|
| 1245 |
+
as a list of strings.
|
| 1246 |
+
"""
|
| 1247 |
+
start, productions = read_grammar(
|
| 1248 |
+
input, standard_nonterm_parser, probabilistic=True, encoding=encoding
|
| 1249 |
+
)
|
| 1250 |
+
return cls(start, productions)
|
| 1251 |
+
|
| 1252 |
+
|
| 1253 |
+
#################################################################
|
| 1254 |
+
# Inducing Grammars
|
| 1255 |
+
#################################################################
|
| 1256 |
+
|
| 1257 |
+
# Contributed by Nathan Bodenstab <bodenstab@cslu.ogi.edu>
|
| 1258 |
+
|
| 1259 |
+
|
| 1260 |
+
def induce_pcfg(start, productions):
|
| 1261 |
+
r"""
|
| 1262 |
+
Induce a PCFG grammar from a list of productions.
|
| 1263 |
+
|
| 1264 |
+
The probability of a production A -> B C in a PCFG is:
|
| 1265 |
+
|
| 1266 |
+
| count(A -> B C)
|
| 1267 |
+
| P(B, C | A) = --------------- where \* is any right hand side
|
| 1268 |
+
| count(A -> \*)
|
| 1269 |
+
|
| 1270 |
+
:param start: The start symbol
|
| 1271 |
+
:type start: Nonterminal
|
| 1272 |
+
:param productions: The list of productions that defines the grammar
|
| 1273 |
+
:type productions: list(Production)
|
| 1274 |
+
"""
|
| 1275 |
+
# Production count: the number of times a given production occurs
|
| 1276 |
+
pcount = {}
|
| 1277 |
+
|
| 1278 |
+
# LHS-count: counts the number of times a given lhs occurs
|
| 1279 |
+
lcount = {}
|
| 1280 |
+
|
| 1281 |
+
for prod in productions:
|
| 1282 |
+
lcount[prod.lhs()] = lcount.get(prod.lhs(), 0) + 1
|
| 1283 |
+
pcount[prod] = pcount.get(prod, 0) + 1
|
| 1284 |
+
|
| 1285 |
+
prods = [
|
| 1286 |
+
ProbabilisticProduction(p.lhs(), p.rhs(), prob=pcount[p] / lcount[p.lhs()])
|
| 1287 |
+
for p in pcount
|
| 1288 |
+
]
|
| 1289 |
+
return PCFG(start, prods)
|
| 1290 |
+
|
| 1291 |
+
|
| 1292 |
+
#################################################################
|
| 1293 |
+
# Helper functions for reading productions
|
| 1294 |
+
#################################################################
|
| 1295 |
+
|
| 1296 |
+
|
| 1297 |
+
def _read_cfg_production(input):
|
| 1298 |
+
"""
|
| 1299 |
+
Return a list of context-free ``Productions``.
|
| 1300 |
+
"""
|
| 1301 |
+
return _read_production(input, standard_nonterm_parser)
|
| 1302 |
+
|
| 1303 |
+
|
| 1304 |
+
def _read_pcfg_production(input):
|
| 1305 |
+
"""
|
| 1306 |
+
Return a list of PCFG ``ProbabilisticProductions``.
|
| 1307 |
+
"""
|
| 1308 |
+
return _read_production(input, standard_nonterm_parser, probabilistic=True)
|
| 1309 |
+
|
| 1310 |
+
|
| 1311 |
+
def _read_fcfg_production(input, fstruct_reader):
|
| 1312 |
+
"""
|
| 1313 |
+
Return a list of feature-based ``Productions``.
|
| 1314 |
+
"""
|
| 1315 |
+
return _read_production(input, fstruct_reader)
|
| 1316 |
+
|
| 1317 |
+
|
| 1318 |
+
# Parsing generic grammars
|
| 1319 |
+
|
| 1320 |
+
_ARROW_RE = re.compile(r"\s* -> \s*", re.VERBOSE)
|
| 1321 |
+
_PROBABILITY_RE = re.compile(r"( \[ [\d\.]+ \] ) \s*", re.VERBOSE)
|
| 1322 |
+
_TERMINAL_RE = re.compile(r'( "[^"]*" | \'[^\']*\' ) \s*', re.VERBOSE)
|
| 1323 |
+
_DISJUNCTION_RE = re.compile(r"\| \s*", re.VERBOSE)
|
| 1324 |
+
|
| 1325 |
+
|
| 1326 |
+
def _read_production(line, nonterm_parser, probabilistic=False):
|
| 1327 |
+
"""
|
| 1328 |
+
Parse a grammar rule, given as a string, and return
|
| 1329 |
+
a list of productions.
|
| 1330 |
+
"""
|
| 1331 |
+
pos = 0
|
| 1332 |
+
|
| 1333 |
+
# Parse the left-hand side.
|
| 1334 |
+
lhs, pos = nonterm_parser(line, pos)
|
| 1335 |
+
|
| 1336 |
+
# Skip over the arrow.
|
| 1337 |
+
m = _ARROW_RE.match(line, pos)
|
| 1338 |
+
if not m:
|
| 1339 |
+
raise ValueError("Expected an arrow")
|
| 1340 |
+
pos = m.end()
|
| 1341 |
+
|
| 1342 |
+
# Parse the right hand side.
|
| 1343 |
+
probabilities = [0.0]
|
| 1344 |
+
rhsides = [[]]
|
| 1345 |
+
while pos < len(line):
|
| 1346 |
+
# Probability.
|
| 1347 |
+
m = _PROBABILITY_RE.match(line, pos)
|
| 1348 |
+
if probabilistic and m:
|
| 1349 |
+
pos = m.end()
|
| 1350 |
+
probabilities[-1] = float(m.group(1)[1:-1])
|
| 1351 |
+
if probabilities[-1] > 1.0:
|
| 1352 |
+
raise ValueError(
|
| 1353 |
+
"Production probability %f, "
|
| 1354 |
+
"should not be greater than 1.0" % (probabilities[-1],)
|
| 1355 |
+
)
|
| 1356 |
+
|
| 1357 |
+
# String -- add terminal.
|
| 1358 |
+
elif line[pos] in "'\"":
|
| 1359 |
+
m = _TERMINAL_RE.match(line, pos)
|
| 1360 |
+
if not m:
|
| 1361 |
+
raise ValueError("Unterminated string")
|
| 1362 |
+
rhsides[-1].append(m.group(1)[1:-1])
|
| 1363 |
+
pos = m.end()
|
| 1364 |
+
|
| 1365 |
+
# Vertical bar -- start new rhside.
|
| 1366 |
+
elif line[pos] == "|":
|
| 1367 |
+
m = _DISJUNCTION_RE.match(line, pos)
|
| 1368 |
+
probabilities.append(0.0)
|
| 1369 |
+
rhsides.append([])
|
| 1370 |
+
pos = m.end()
|
| 1371 |
+
|
| 1372 |
+
# Anything else -- nonterminal.
|
| 1373 |
+
else:
|
| 1374 |
+
nonterm, pos = nonterm_parser(line, pos)
|
| 1375 |
+
rhsides[-1].append(nonterm)
|
| 1376 |
+
|
| 1377 |
+
if probabilistic:
|
| 1378 |
+
return [
|
| 1379 |
+
ProbabilisticProduction(lhs, rhs, prob=probability)
|
| 1380 |
+
for (rhs, probability) in zip(rhsides, probabilities)
|
| 1381 |
+
]
|
| 1382 |
+
else:
|
| 1383 |
+
return [Production(lhs, rhs) for rhs in rhsides]
|
| 1384 |
+
|
| 1385 |
+
|
| 1386 |
+
#################################################################
|
| 1387 |
+
# Reading Phrase Structure Grammars
|
| 1388 |
+
#################################################################
|
| 1389 |
+
|
| 1390 |
+
|
| 1391 |
+
def read_grammar(input, nonterm_parser, probabilistic=False, encoding=None):
|
| 1392 |
+
"""
|
| 1393 |
+
Return a pair consisting of a starting category and a list of
|
| 1394 |
+
``Productions``.
|
| 1395 |
+
|
| 1396 |
+
:param input: a grammar, either in the form of a string or else
|
| 1397 |
+
as a list of strings.
|
| 1398 |
+
:param nonterm_parser: a function for parsing nonterminals.
|
| 1399 |
+
It should take a ``(string, position)`` as argument and
|
| 1400 |
+
return a ``(nonterminal, position)`` as result.
|
| 1401 |
+
:param probabilistic: are the grammar rules probabilistic?
|
| 1402 |
+
:type probabilistic: bool
|
| 1403 |
+
:param encoding: the encoding of the grammar, if it is a binary string
|
| 1404 |
+
:type encoding: str
|
| 1405 |
+
"""
|
| 1406 |
+
if encoding is not None:
|
| 1407 |
+
input = input.decode(encoding)
|
| 1408 |
+
if isinstance(input, str):
|
| 1409 |
+
lines = input.split("\n")
|
| 1410 |
+
else:
|
| 1411 |
+
lines = input
|
| 1412 |
+
|
| 1413 |
+
start = None
|
| 1414 |
+
productions = []
|
| 1415 |
+
continue_line = ""
|
| 1416 |
+
for linenum, line in enumerate(lines):
|
| 1417 |
+
line = continue_line + line.strip()
|
| 1418 |
+
if line.startswith("#") or line == "":
|
| 1419 |
+
continue
|
| 1420 |
+
if line.endswith("\\"):
|
| 1421 |
+
continue_line = line[:-1].rstrip() + " "
|
| 1422 |
+
continue
|
| 1423 |
+
continue_line = ""
|
| 1424 |
+
try:
|
| 1425 |
+
if line[0] == "%":
|
| 1426 |
+
directive, args = line[1:].split(None, 1)
|
| 1427 |
+
if directive == "start":
|
| 1428 |
+
start, pos = nonterm_parser(args, 0)
|
| 1429 |
+
if pos != len(args):
|
| 1430 |
+
raise ValueError("Bad argument to start directive")
|
| 1431 |
+
else:
|
| 1432 |
+
raise ValueError("Bad directive")
|
| 1433 |
+
else:
|
| 1434 |
+
# expand out the disjunctions on the RHS
|
| 1435 |
+
productions += _read_production(line, nonterm_parser, probabilistic)
|
| 1436 |
+
except ValueError as e:
|
| 1437 |
+
raise ValueError(f"Unable to parse line {linenum + 1}: {line}\n{e}") from e
|
| 1438 |
+
|
| 1439 |
+
if not productions:
|
| 1440 |
+
raise ValueError("No productions found!")
|
| 1441 |
+
if not start:
|
| 1442 |
+
start = productions[0].lhs()
|
| 1443 |
+
return (start, productions)
|
| 1444 |
+
|
| 1445 |
+
|
| 1446 |
+
_STANDARD_NONTERM_RE = re.compile(r"( [\w/][\w/^<>-]* ) \s*", re.VERBOSE)
|
| 1447 |
+
|
| 1448 |
+
|
| 1449 |
+
def standard_nonterm_parser(string, pos):
|
| 1450 |
+
m = _STANDARD_NONTERM_RE.match(string, pos)
|
| 1451 |
+
if not m:
|
| 1452 |
+
raise ValueError("Expected a nonterminal, found: " + string[pos:])
|
| 1453 |
+
return (Nonterminal(m.group(1)), m.end())
|
| 1454 |
+
|
| 1455 |
+
|
| 1456 |
+
#################################################################
|
| 1457 |
+
# Reading Dependency Grammars
|
| 1458 |
+
#################################################################
|
| 1459 |
+
|
| 1460 |
+
_READ_DG_RE = re.compile(
|
| 1461 |
+
r"""^\s* # leading whitespace
|
| 1462 |
+
('[^']+')\s* # single-quoted lhs
|
| 1463 |
+
(?:[-=]+>)\s* # arrow
|
| 1464 |
+
(?:( # rhs:
|
| 1465 |
+
"[^"]+" # doubled-quoted terminal
|
| 1466 |
+
| '[^']+' # single-quoted terminal
|
| 1467 |
+
| \| # disjunction
|
| 1468 |
+
)
|
| 1469 |
+
\s*) # trailing space
|
| 1470 |
+
*$""", # zero or more copies
|
| 1471 |
+
re.VERBOSE,
|
| 1472 |
+
)
|
| 1473 |
+
_SPLIT_DG_RE = re.compile(r"""('[^']'|[-=]+>|"[^"]+"|'[^']+'|\|)""")
|
| 1474 |
+
|
| 1475 |
+
|
| 1476 |
+
def _read_dependency_production(s):
|
| 1477 |
+
if not _READ_DG_RE.match(s):
|
| 1478 |
+
raise ValueError("Bad production string")
|
| 1479 |
+
pieces = _SPLIT_DG_RE.split(s)
|
| 1480 |
+
pieces = [p for i, p in enumerate(pieces) if i % 2 == 1]
|
| 1481 |
+
lhside = pieces[0].strip("'\"")
|
| 1482 |
+
rhsides = [[]]
|
| 1483 |
+
for piece in pieces[2:]:
|
| 1484 |
+
if piece == "|":
|
| 1485 |
+
rhsides.append([])
|
| 1486 |
+
else:
|
| 1487 |
+
rhsides[-1].append(piece.strip("'\""))
|
| 1488 |
+
return [DependencyProduction(lhside, rhside) for rhside in rhsides]
|
| 1489 |
+
|
| 1490 |
+
|
| 1491 |
+
#################################################################
|
| 1492 |
+
# Demonstration
|
| 1493 |
+
#################################################################
|
| 1494 |
+
|
| 1495 |
+
|
| 1496 |
+
def cfg_demo():
|
| 1497 |
+
"""
|
| 1498 |
+
A demonstration showing how ``CFGs`` can be created and used.
|
| 1499 |
+
"""
|
| 1500 |
+
|
| 1501 |
+
from nltk import CFG, Production, nonterminals
|
| 1502 |
+
|
| 1503 |
+
# Create some nonterminals
|
| 1504 |
+
S, NP, VP, PP = nonterminals("S, NP, VP, PP")
|
| 1505 |
+
N, V, P, Det = nonterminals("N, V, P, Det")
|
| 1506 |
+
VP_slash_NP = VP / NP
|
| 1507 |
+
|
| 1508 |
+
print("Some nonterminals:", [S, NP, VP, PP, N, V, P, Det, VP / NP])
|
| 1509 |
+
print(" S.symbol() =>", repr(S.symbol()))
|
| 1510 |
+
print()
|
| 1511 |
+
|
| 1512 |
+
print(Production(S, [NP]))
|
| 1513 |
+
|
| 1514 |
+
# Create some Grammar Productions
|
| 1515 |
+
grammar = CFG.fromstring(
|
| 1516 |
+
"""
|
| 1517 |
+
S -> NP VP
|
| 1518 |
+
PP -> P NP
|
| 1519 |
+
NP -> Det N | NP PP
|
| 1520 |
+
VP -> V NP | VP PP
|
| 1521 |
+
Det -> 'a' | 'the'
|
| 1522 |
+
N -> 'dog' | 'cat'
|
| 1523 |
+
V -> 'chased' | 'sat'
|
| 1524 |
+
P -> 'on' | 'in'
|
| 1525 |
+
"""
|
| 1526 |
+
)
|
| 1527 |
+
|
| 1528 |
+
print("A Grammar:", repr(grammar))
|
| 1529 |
+
print(" grammar.start() =>", repr(grammar.start()))
|
| 1530 |
+
print(" grammar.productions() =>", end=" ")
|
| 1531 |
+
# Use string.replace(...) is to line-wrap the output.
|
| 1532 |
+
print(repr(grammar.productions()).replace(",", ",\n" + " " * 25))
|
| 1533 |
+
print()
|
| 1534 |
+
|
| 1535 |
+
|
| 1536 |
+
def pcfg_demo():
|
| 1537 |
+
"""
|
| 1538 |
+
A demonstration showing how a ``PCFG`` can be created and used.
|
| 1539 |
+
"""
|
| 1540 |
+
|
| 1541 |
+
from nltk import induce_pcfg, treetransforms
|
| 1542 |
+
from nltk.corpus import treebank
|
| 1543 |
+
from nltk.parse import pchart
|
| 1544 |
+
|
| 1545 |
+
toy_pcfg1 = PCFG.fromstring(
|
| 1546 |
+
"""
|
| 1547 |
+
S -> NP VP [1.0]
|
| 1548 |
+
NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
|
| 1549 |
+
Det -> 'the' [0.8] | 'my' [0.2]
|
| 1550 |
+
N -> 'man' [0.5] | 'telescope' [0.5]
|
| 1551 |
+
VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
|
| 1552 |
+
V -> 'ate' [0.35] | 'saw' [0.65]
|
| 1553 |
+
PP -> P NP [1.0]
|
| 1554 |
+
P -> 'with' [0.61] | 'under' [0.39]
|
| 1555 |
+
"""
|
| 1556 |
+
)
|
| 1557 |
+
|
| 1558 |
+
toy_pcfg2 = PCFG.fromstring(
|
| 1559 |
+
"""
|
| 1560 |
+
S -> NP VP [1.0]
|
| 1561 |
+
VP -> V NP [.59]
|
| 1562 |
+
VP -> V [.40]
|
| 1563 |
+
VP -> VP PP [.01]
|
| 1564 |
+
NP -> Det N [.41]
|
| 1565 |
+
NP -> Name [.28]
|
| 1566 |
+
NP -> NP PP [.31]
|
| 1567 |
+
PP -> P NP [1.0]
|
| 1568 |
+
V -> 'saw' [.21]
|
| 1569 |
+
V -> 'ate' [.51]
|
| 1570 |
+
V -> 'ran' [.28]
|
| 1571 |
+
N -> 'boy' [.11]
|
| 1572 |
+
N -> 'cookie' [.12]
|
| 1573 |
+
N -> 'table' [.13]
|
| 1574 |
+
N -> 'telescope' [.14]
|
| 1575 |
+
N -> 'hill' [.5]
|
| 1576 |
+
Name -> 'Jack' [.52]
|
| 1577 |
+
Name -> 'Bob' [.48]
|
| 1578 |
+
P -> 'with' [.61]
|
| 1579 |
+
P -> 'under' [.39]
|
| 1580 |
+
Det -> 'the' [.41]
|
| 1581 |
+
Det -> 'a' [.31]
|
| 1582 |
+
Det -> 'my' [.28]
|
| 1583 |
+
"""
|
| 1584 |
+
)
|
| 1585 |
+
|
| 1586 |
+
pcfg_prods = toy_pcfg1.productions()
|
| 1587 |
+
|
| 1588 |
+
pcfg_prod = pcfg_prods[2]
|
| 1589 |
+
print("A PCFG production:", repr(pcfg_prod))
|
| 1590 |
+
print(" pcfg_prod.lhs() =>", repr(pcfg_prod.lhs()))
|
| 1591 |
+
print(" pcfg_prod.rhs() =>", repr(pcfg_prod.rhs()))
|
| 1592 |
+
print(" pcfg_prod.prob() =>", repr(pcfg_prod.prob()))
|
| 1593 |
+
print()
|
| 1594 |
+
|
| 1595 |
+
grammar = toy_pcfg2
|
| 1596 |
+
print("A PCFG grammar:", repr(grammar))
|
| 1597 |
+
print(" grammar.start() =>", repr(grammar.start()))
|
| 1598 |
+
print(" grammar.productions() =>", end=" ")
|
| 1599 |
+
# Use .replace(...) is to line-wrap the output.
|
| 1600 |
+
print(repr(grammar.productions()).replace(",", ",\n" + " " * 26))
|
| 1601 |
+
print()
|
| 1602 |
+
|
| 1603 |
+
# extract productions from three trees and induce the PCFG
|
| 1604 |
+
print("Induce PCFG grammar from treebank data:")
|
| 1605 |
+
|
| 1606 |
+
productions = []
|
| 1607 |
+
item = treebank._fileids[0]
|
| 1608 |
+
for tree in treebank.parsed_sents(item)[:3]:
|
| 1609 |
+
# perform optional tree transformations, e.g.:
|
| 1610 |
+
tree.collapse_unary(collapsePOS=False)
|
| 1611 |
+
tree.chomsky_normal_form(horzMarkov=2)
|
| 1612 |
+
|
| 1613 |
+
productions += tree.productions()
|
| 1614 |
+
|
| 1615 |
+
S = Nonterminal("S")
|
| 1616 |
+
grammar = induce_pcfg(S, productions)
|
| 1617 |
+
print(grammar)
|
| 1618 |
+
print()
|
| 1619 |
+
|
| 1620 |
+
print("Parse sentence using induced grammar:")
|
| 1621 |
+
|
| 1622 |
+
parser = pchart.InsideChartParser(grammar)
|
| 1623 |
+
parser.trace(3)
|
| 1624 |
+
|
| 1625 |
+
# doesn't work as tokens are different:
|
| 1626 |
+
# sent = treebank.tokenized('wsj_0001.mrg')[0]
|
| 1627 |
+
|
| 1628 |
+
sent = treebank.parsed_sents(item)[0].leaves()
|
| 1629 |
+
print(sent)
|
| 1630 |
+
for parse in parser.parse(sent):
|
| 1631 |
+
print(parse)
|
| 1632 |
+
|
| 1633 |
+
|
| 1634 |
+
def fcfg_demo():
|
| 1635 |
+
import nltk.data
|
| 1636 |
+
|
| 1637 |
+
g = nltk.data.load("grammars/book_grammars/feat0.fcfg")
|
| 1638 |
+
print(g)
|
| 1639 |
+
print()
|
| 1640 |
+
|
| 1641 |
+
|
| 1642 |
+
def dg_demo():
|
| 1643 |
+
"""
|
| 1644 |
+
A demonstration showing the creation and inspection of a
|
| 1645 |
+
``DependencyGrammar``.
|
| 1646 |
+
"""
|
| 1647 |
+
grammar = DependencyGrammar.fromstring(
|
| 1648 |
+
"""
|
| 1649 |
+
'scratch' -> 'cats' | 'walls'
|
| 1650 |
+
'walls' -> 'the'
|
| 1651 |
+
'cats' -> 'the'
|
| 1652 |
+
"""
|
| 1653 |
+
)
|
| 1654 |
+
print(grammar)
|
| 1655 |
+
|
| 1656 |
+
|
| 1657 |
+
def sdg_demo():
|
| 1658 |
+
"""
|
| 1659 |
+
A demonstration of how to read a string representation of
|
| 1660 |
+
a CoNLL format dependency tree.
|
| 1661 |
+
"""
|
| 1662 |
+
from nltk.parse import DependencyGraph
|
| 1663 |
+
|
| 1664 |
+
dg = DependencyGraph(
|
| 1665 |
+
"""
|
| 1666 |
+
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
|
| 1667 |
+
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
|
| 1668 |
+
3 met met Prep Prep voor 8 mod _ _
|
| 1669 |
+
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
|
| 1670 |
+
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
|
| 1671 |
+
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
|
| 1672 |
+
7 gaan ga V V hulp|inf 6 vc _ _
|
| 1673 |
+
8 winkelen winkel V V intrans|inf 11 cnj _ _
|
| 1674 |
+
9 , , Punc Punc komma 8 punct _ _
|
| 1675 |
+
10 zwemmen zwem V V intrans|inf 11 cnj _ _
|
| 1676 |
+
11 of of Conj Conj neven 7 vc _ _
|
| 1677 |
+
12 terrassen terras N N soort|mv|neut 11 cnj _ _
|
| 1678 |
+
13 . . Punc Punc punt 12 punct _ _
|
| 1679 |
+
"""
|
| 1680 |
+
)
|
| 1681 |
+
tree = dg.tree()
|
| 1682 |
+
print(tree.pprint())
|
| 1683 |
+
|
| 1684 |
+
|
| 1685 |
+
def demo():
|
| 1686 |
+
cfg_demo()
|
| 1687 |
+
pcfg_demo()
|
| 1688 |
+
fcfg_demo()
|
| 1689 |
+
dg_demo()
|
| 1690 |
+
sdg_demo()
|
| 1691 |
+
|
| 1692 |
+
|
| 1693 |
+
if __name__ == "__main__":
|
| 1694 |
+
demo()
|
| 1695 |
+
|
| 1696 |
+
__all__ = [
|
| 1697 |
+
"Nonterminal",
|
| 1698 |
+
"nonterminals",
|
| 1699 |
+
"CFG",
|
| 1700 |
+
"Production",
|
| 1701 |
+
"PCFG",
|
| 1702 |
+
"ProbabilisticProduction",
|
| 1703 |
+
"DependencyGrammar",
|
| 1704 |
+
"DependencyProduction",
|
| 1705 |
+
"ProbabilisticDependencyGrammar",
|
| 1706 |
+
"induce_pcfg",
|
| 1707 |
+
"read_grammar",
|
| 1708 |
+
]
|
lib/python3.10/site-packages/nltk/help.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit (NLTK) Help
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Authors: Steven Bird <stevenbird1@gmail.com>
|
| 5 |
+
# URL: <https://www.nltk.org/>
|
| 6 |
+
# For license information, see LICENSE.TXT
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
Provide structured access to documentation.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import re
|
| 13 |
+
from textwrap import wrap
|
| 14 |
+
|
| 15 |
+
from nltk.data import load
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def brown_tagset(tagpattern=None):
|
| 19 |
+
_format_tagset("brown_tagset", tagpattern)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def claws5_tagset(tagpattern=None):
|
| 23 |
+
_format_tagset("claws5_tagset", tagpattern)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def upenn_tagset(tagpattern=None):
|
| 27 |
+
_format_tagset("upenn_tagset", tagpattern)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
#####################################################################
|
| 31 |
+
# UTILITIES
|
| 32 |
+
#####################################################################
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _print_entries(tags, tagdict):
|
| 36 |
+
for tag in tags:
|
| 37 |
+
entry = tagdict[tag]
|
| 38 |
+
defn = [tag + ": " + entry[0]]
|
| 39 |
+
examples = wrap(
|
| 40 |
+
entry[1], width=75, initial_indent=" ", subsequent_indent=" "
|
| 41 |
+
)
|
| 42 |
+
print("\n".join(defn + examples))
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def _format_tagset(tagset, tagpattern=None):
|
| 46 |
+
tagdict = load("help/tagsets/" + tagset + ".pickle")
|
| 47 |
+
if not tagpattern:
|
| 48 |
+
_print_entries(sorted(tagdict), tagdict)
|
| 49 |
+
elif tagpattern in tagdict:
|
| 50 |
+
_print_entries([tagpattern], tagdict)
|
| 51 |
+
else:
|
| 52 |
+
tagpattern = re.compile(tagpattern)
|
| 53 |
+
tags = [tag for tag in sorted(tagdict) if tagpattern.match(tag)]
|
| 54 |
+
if tags:
|
| 55 |
+
_print_entries(tags, tagdict)
|
| 56 |
+
else:
|
| 57 |
+
print("No matching tags found.")
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
if __name__ == "__main__":
|
| 61 |
+
brown_tagset(r"NN.*")
|
| 62 |
+
upenn_tagset(r".*\$")
|
| 63 |
+
claws5_tagset("UNDEFINED")
|
| 64 |
+
brown_tagset(r"NN")
|
lib/python3.10/site-packages/nltk/internals.py
ADDED
|
@@ -0,0 +1,1123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: Internal utility functions
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Author: Steven Bird <stevenbird1@gmail.com>
|
| 5 |
+
# Edward Loper <edloper@gmail.com>
|
| 6 |
+
# Nitin Madnani <nmadnani@ets.org>
|
| 7 |
+
# URL: <https://www.nltk.org/>
|
| 8 |
+
# For license information, see LICENSE.TXT
|
| 9 |
+
|
| 10 |
+
import fnmatch
|
| 11 |
+
import locale
|
| 12 |
+
import os
|
| 13 |
+
import re
|
| 14 |
+
import stat
|
| 15 |
+
import subprocess
|
| 16 |
+
import sys
|
| 17 |
+
import textwrap
|
| 18 |
+
import types
|
| 19 |
+
import warnings
|
| 20 |
+
from xml.etree import ElementTree
|
| 21 |
+
|
| 22 |
+
##########################################################################
|
| 23 |
+
# Java Via Command-Line
|
| 24 |
+
##########################################################################
|
| 25 |
+
|
| 26 |
+
_java_bin = None
|
| 27 |
+
_java_options = []
|
| 28 |
+
# [xx] add classpath option to config_java?
|
| 29 |
+
def config_java(bin=None, options=None, verbose=False):
|
| 30 |
+
"""
|
| 31 |
+
Configure nltk's java interface, by letting nltk know where it can
|
| 32 |
+
find the Java binary, and what extra options (if any) should be
|
| 33 |
+
passed to Java when it is run.
|
| 34 |
+
|
| 35 |
+
:param bin: The full path to the Java binary. If not specified,
|
| 36 |
+
then nltk will search the system for a Java binary; and if
|
| 37 |
+
one is not found, it will raise a ``LookupError`` exception.
|
| 38 |
+
:type bin: str
|
| 39 |
+
:param options: A list of options that should be passed to the
|
| 40 |
+
Java binary when it is called. A common value is
|
| 41 |
+
``'-Xmx512m'``, which tells Java binary to increase
|
| 42 |
+
the maximum heap size to 512 megabytes. If no options are
|
| 43 |
+
specified, then do not modify the options list.
|
| 44 |
+
:type options: list(str)
|
| 45 |
+
"""
|
| 46 |
+
global _java_bin, _java_options
|
| 47 |
+
_java_bin = find_binary(
|
| 48 |
+
"java",
|
| 49 |
+
bin,
|
| 50 |
+
env_vars=["JAVAHOME", "JAVA_HOME"],
|
| 51 |
+
verbose=verbose,
|
| 52 |
+
binary_names=["java.exe"],
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
if options is not None:
|
| 56 |
+
if isinstance(options, str):
|
| 57 |
+
options = options.split()
|
| 58 |
+
_java_options = list(options)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None, blocking=True):
|
| 62 |
+
"""
|
| 63 |
+
Execute the given java command, by opening a subprocess that calls
|
| 64 |
+
Java. If java has not yet been configured, it will be configured
|
| 65 |
+
by calling ``config_java()`` with no arguments.
|
| 66 |
+
|
| 67 |
+
:param cmd: The java command that should be called, formatted as
|
| 68 |
+
a list of strings. Typically, the first string will be the name
|
| 69 |
+
of the java class; and the remaining strings will be arguments
|
| 70 |
+
for that java class.
|
| 71 |
+
:type cmd: list(str)
|
| 72 |
+
|
| 73 |
+
:param classpath: A ``':'`` separated list of directories, JAR
|
| 74 |
+
archives, and ZIP archives to search for class files.
|
| 75 |
+
:type classpath: str
|
| 76 |
+
|
| 77 |
+
:param stdin: Specify the executed program's
|
| 78 |
+
standard input file handles, respectively. Valid values are ``subprocess.PIPE``,
|
| 79 |
+
an existing file descriptor (a positive integer), an existing
|
| 80 |
+
file object, 'pipe', 'stdout', 'devnull' and None. ``subprocess.PIPE`` indicates that a
|
| 81 |
+
new pipe to the child should be created. With None, no
|
| 82 |
+
redirection will occur; the child's file handles will be
|
| 83 |
+
inherited from the parent. Additionally, stderr can be
|
| 84 |
+
``subprocess.STDOUT``, which indicates that the stderr data
|
| 85 |
+
from the applications should be captured into the same file
|
| 86 |
+
handle as for stdout.
|
| 87 |
+
|
| 88 |
+
:param stdout: Specify the executed program's standard output file
|
| 89 |
+
handle. See ``stdin`` for valid values.
|
| 90 |
+
|
| 91 |
+
:param stderr: Specify the executed program's standard error file
|
| 92 |
+
handle. See ``stdin`` for valid values.
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
:param blocking: If ``false``, then return immediately after
|
| 96 |
+
spawning the subprocess. In this case, the return value is
|
| 97 |
+
the ``Popen`` object, and not a ``(stdout, stderr)`` tuple.
|
| 98 |
+
|
| 99 |
+
:return: If ``blocking=True``, then return a tuple ``(stdout,
|
| 100 |
+
stderr)``, containing the stdout and stderr outputs generated
|
| 101 |
+
by the java command if the ``stdout`` and ``stderr`` parameters
|
| 102 |
+
were set to ``subprocess.PIPE``; or None otherwise. If
|
| 103 |
+
``blocking=False``, then return a ``subprocess.Popen`` object.
|
| 104 |
+
|
| 105 |
+
:raise OSError: If the java command returns a nonzero return code.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
subprocess_output_dict = {
|
| 109 |
+
"pipe": subprocess.PIPE,
|
| 110 |
+
"stdout": subprocess.STDOUT,
|
| 111 |
+
"devnull": subprocess.DEVNULL,
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
stdin = subprocess_output_dict.get(stdin, stdin)
|
| 115 |
+
stdout = subprocess_output_dict.get(stdout, stdout)
|
| 116 |
+
stderr = subprocess_output_dict.get(stderr, stderr)
|
| 117 |
+
|
| 118 |
+
if isinstance(cmd, str):
|
| 119 |
+
raise TypeError("cmd should be a list of strings")
|
| 120 |
+
|
| 121 |
+
# Make sure we know where a java binary is.
|
| 122 |
+
if _java_bin is None:
|
| 123 |
+
config_java()
|
| 124 |
+
|
| 125 |
+
# Set up the classpath.
|
| 126 |
+
if isinstance(classpath, str):
|
| 127 |
+
classpaths = [classpath]
|
| 128 |
+
else:
|
| 129 |
+
classpaths = list(classpath)
|
| 130 |
+
classpath = os.path.pathsep.join(classpaths)
|
| 131 |
+
|
| 132 |
+
# Construct the full command string.
|
| 133 |
+
cmd = list(cmd)
|
| 134 |
+
cmd = ["-cp", classpath] + cmd
|
| 135 |
+
cmd = [_java_bin] + _java_options + cmd
|
| 136 |
+
|
| 137 |
+
# Call java via a subprocess
|
| 138 |
+
p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr)
|
| 139 |
+
if not blocking:
|
| 140 |
+
return p
|
| 141 |
+
(stdout, stderr) = p.communicate()
|
| 142 |
+
|
| 143 |
+
# Check the return code.
|
| 144 |
+
if p.returncode != 0:
|
| 145 |
+
print(_decode_stdoutdata(stderr))
|
| 146 |
+
raise OSError("Java command failed : " + str(cmd))
|
| 147 |
+
|
| 148 |
+
return (stdout, stderr)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
######################################################################
|
| 152 |
+
# Parsing
|
| 153 |
+
######################################################################
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
class ReadError(ValueError):
|
| 157 |
+
"""
|
| 158 |
+
Exception raised by read_* functions when they fail.
|
| 159 |
+
:param position: The index in the input string where an error occurred.
|
| 160 |
+
:param expected: What was expected when an error occurred.
|
| 161 |
+
"""
|
| 162 |
+
|
| 163 |
+
def __init__(self, expected, position):
|
| 164 |
+
ValueError.__init__(self, expected, position)
|
| 165 |
+
self.expected = expected
|
| 166 |
+
self.position = position
|
| 167 |
+
|
| 168 |
+
def __str__(self):
|
| 169 |
+
return f"Expected {self.expected} at {self.position}"
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
_STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')")
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def read_str(s, start_position):
|
| 176 |
+
"""
|
| 177 |
+
If a Python string literal begins at the specified position in the
|
| 178 |
+
given string, then return a tuple ``(val, end_position)``
|
| 179 |
+
containing the value of the string literal and the position where
|
| 180 |
+
it ends. Otherwise, raise a ``ReadError``.
|
| 181 |
+
|
| 182 |
+
:param s: A string that will be checked to see if within which a
|
| 183 |
+
Python string literal exists.
|
| 184 |
+
:type s: str
|
| 185 |
+
|
| 186 |
+
:param start_position: The specified beginning position of the string ``s``
|
| 187 |
+
to begin regex matching.
|
| 188 |
+
:type start_position: int
|
| 189 |
+
|
| 190 |
+
:return: A tuple containing the matched string literal evaluated as a
|
| 191 |
+
string and the end position of the string literal.
|
| 192 |
+
:rtype: tuple(str, int)
|
| 193 |
+
|
| 194 |
+
:raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a
|
| 195 |
+
match in ``s`` at ``start_position``, i.e., open quote. If the
|
| 196 |
+
``_STRING_END_RE`` regex doesn't return a match in ``s`` at the
|
| 197 |
+
end of the first match, i.e., close quote.
|
| 198 |
+
:raise ValueError: If an invalid string (i.e., contains an invalid
|
| 199 |
+
escape sequence) is passed into the ``eval``.
|
| 200 |
+
|
| 201 |
+
:Example:
|
| 202 |
+
|
| 203 |
+
>>> from nltk.internals import read_str
|
| 204 |
+
>>> read_str('"Hello", World!', 0)
|
| 205 |
+
('Hello', 7)
|
| 206 |
+
|
| 207 |
+
"""
|
| 208 |
+
# Read the open quote, and any modifiers.
|
| 209 |
+
m = _STRING_START_RE.match(s, start_position)
|
| 210 |
+
if not m:
|
| 211 |
+
raise ReadError("open quote", start_position)
|
| 212 |
+
quotemark = m.group(1)
|
| 213 |
+
|
| 214 |
+
# Find the close quote.
|
| 215 |
+
_STRING_END_RE = re.compile(r"\\|%s" % quotemark)
|
| 216 |
+
position = m.end()
|
| 217 |
+
while True:
|
| 218 |
+
match = _STRING_END_RE.search(s, position)
|
| 219 |
+
if not match:
|
| 220 |
+
raise ReadError("close quote", position)
|
| 221 |
+
if match.group(0) == "\\":
|
| 222 |
+
position = match.end() + 1
|
| 223 |
+
else:
|
| 224 |
+
break
|
| 225 |
+
|
| 226 |
+
# Process it, using eval. Strings with invalid escape sequences
|
| 227 |
+
# might raise ValueError.
|
| 228 |
+
try:
|
| 229 |
+
return eval(s[start_position : match.end()]), match.end()
|
| 230 |
+
except ValueError as e:
|
| 231 |
+
raise ReadError("valid escape sequence", start_position) from e
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
_READ_INT_RE = re.compile(r"-?\d+")
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def read_int(s, start_position):
|
| 238 |
+
"""
|
| 239 |
+
If an integer begins at the specified position in the given
|
| 240 |
+
string, then return a tuple ``(val, end_position)`` containing the
|
| 241 |
+
value of the integer and the position where it ends. Otherwise,
|
| 242 |
+
raise a ``ReadError``.
|
| 243 |
+
|
| 244 |
+
:param s: A string that will be checked to see if within which a
|
| 245 |
+
Python integer exists.
|
| 246 |
+
:type s: str
|
| 247 |
+
|
| 248 |
+
:param start_position: The specified beginning position of the string ``s``
|
| 249 |
+
to begin regex matching.
|
| 250 |
+
:type start_position: int
|
| 251 |
+
|
| 252 |
+
:return: A tuple containing the matched integer casted to an int,
|
| 253 |
+
and the end position of the int in ``s``.
|
| 254 |
+
:rtype: tuple(int, int)
|
| 255 |
+
|
| 256 |
+
:raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a
|
| 257 |
+
match in ``s`` at ``start_position``.
|
| 258 |
+
|
| 259 |
+
:Example:
|
| 260 |
+
|
| 261 |
+
>>> from nltk.internals import read_int
|
| 262 |
+
>>> read_int('42 is the answer', 0)
|
| 263 |
+
(42, 2)
|
| 264 |
+
|
| 265 |
+
"""
|
| 266 |
+
m = _READ_INT_RE.match(s, start_position)
|
| 267 |
+
if not m:
|
| 268 |
+
raise ReadError("integer", start_position)
|
| 269 |
+
return int(m.group()), m.end()
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
_READ_NUMBER_VALUE = re.compile(r"-?(\d*)([.]?\d*)?")
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def read_number(s, start_position):
|
| 276 |
+
"""
|
| 277 |
+
If an integer or float begins at the specified position in the
|
| 278 |
+
given string, then return a tuple ``(val, end_position)``
|
| 279 |
+
containing the value of the number and the position where it ends.
|
| 280 |
+
Otherwise, raise a ``ReadError``.
|
| 281 |
+
|
| 282 |
+
:param s: A string that will be checked to see if within which a
|
| 283 |
+
Python number exists.
|
| 284 |
+
:type s: str
|
| 285 |
+
|
| 286 |
+
:param start_position: The specified beginning position of the string ``s``
|
| 287 |
+
to begin regex matching.
|
| 288 |
+
:type start_position: int
|
| 289 |
+
|
| 290 |
+
:return: A tuple containing the matched number casted to a ``float``,
|
| 291 |
+
and the end position of the number in ``s``.
|
| 292 |
+
:rtype: tuple(float, int)
|
| 293 |
+
|
| 294 |
+
:raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a
|
| 295 |
+
match in ``s`` at ``start_position``.
|
| 296 |
+
|
| 297 |
+
:Example:
|
| 298 |
+
|
| 299 |
+
>>> from nltk.internals import read_number
|
| 300 |
+
>>> read_number('Pi is 3.14159', 6)
|
| 301 |
+
(3.14159, 13)
|
| 302 |
+
|
| 303 |
+
"""
|
| 304 |
+
m = _READ_NUMBER_VALUE.match(s, start_position)
|
| 305 |
+
if not m or not (m.group(1) or m.group(2)):
|
| 306 |
+
raise ReadError("number", start_position)
|
| 307 |
+
if m.group(2):
|
| 308 |
+
return float(m.group()), m.end()
|
| 309 |
+
else:
|
| 310 |
+
return int(m.group()), m.end()
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
######################################################################
|
| 314 |
+
# Check if a method has been overridden
|
| 315 |
+
######################################################################
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def overridden(method):
|
| 319 |
+
"""
|
| 320 |
+
:return: True if ``method`` overrides some method with the same
|
| 321 |
+
name in a base class. This is typically used when defining
|
| 322 |
+
abstract base classes or interfaces, to allow subclasses to define
|
| 323 |
+
either of two related methods:
|
| 324 |
+
|
| 325 |
+
>>> class EaterI:
|
| 326 |
+
... '''Subclass must define eat() or batch_eat().'''
|
| 327 |
+
... def eat(self, food):
|
| 328 |
+
... if overridden(self.batch_eat):
|
| 329 |
+
... return self.batch_eat([food])[0]
|
| 330 |
+
... else:
|
| 331 |
+
... raise NotImplementedError()
|
| 332 |
+
... def batch_eat(self, foods):
|
| 333 |
+
... return [self.eat(food) for food in foods]
|
| 334 |
+
|
| 335 |
+
:type method: instance method
|
| 336 |
+
"""
|
| 337 |
+
if isinstance(method, types.MethodType) and method.__self__.__class__ is not None:
|
| 338 |
+
name = method.__name__
|
| 339 |
+
funcs = [
|
| 340 |
+
cls.__dict__[name]
|
| 341 |
+
for cls in _mro(method.__self__.__class__)
|
| 342 |
+
if name in cls.__dict__
|
| 343 |
+
]
|
| 344 |
+
return len(funcs) > 1
|
| 345 |
+
else:
|
| 346 |
+
raise TypeError("Expected an instance method.")
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def _mro(cls):
|
| 350 |
+
"""
|
| 351 |
+
Return the method resolution order for ``cls`` -- i.e., a list
|
| 352 |
+
containing ``cls`` and all its base classes, in the order in which
|
| 353 |
+
they would be checked by ``getattr``. For new-style classes, this
|
| 354 |
+
is just cls.__mro__. For classic classes, this can be obtained by
|
| 355 |
+
a depth-first left-to-right traversal of ``__bases__``.
|
| 356 |
+
"""
|
| 357 |
+
if isinstance(cls, type):
|
| 358 |
+
return cls.__mro__
|
| 359 |
+
else:
|
| 360 |
+
mro = [cls]
|
| 361 |
+
for base in cls.__bases__:
|
| 362 |
+
mro.extend(_mro(base))
|
| 363 |
+
return mro
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
######################################################################
|
| 367 |
+
# Deprecation decorator & base class
|
| 368 |
+
######################################################################
|
| 369 |
+
# [xx] dedent msg first if it comes from a docstring.
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def _add_epytext_field(obj, field, message):
|
| 373 |
+
"""Add an epytext @field to a given object's docstring."""
|
| 374 |
+
indent = ""
|
| 375 |
+
# If we already have a docstring, then add a blank line to separate
|
| 376 |
+
# it from the new field, and check its indentation.
|
| 377 |
+
if obj.__doc__:
|
| 378 |
+
obj.__doc__ = obj.__doc__.rstrip() + "\n\n"
|
| 379 |
+
indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs())
|
| 380 |
+
if indents:
|
| 381 |
+
indent = min(indents)
|
| 382 |
+
# If we don't have a docstring, add an empty one.
|
| 383 |
+
else:
|
| 384 |
+
obj.__doc__ = ""
|
| 385 |
+
|
| 386 |
+
obj.__doc__ += textwrap.fill(
|
| 387 |
+
f"@{field}: {message}",
|
| 388 |
+
initial_indent=indent,
|
| 389 |
+
subsequent_indent=indent + " ",
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def deprecated(message):
|
| 394 |
+
"""
|
| 395 |
+
A decorator used to mark functions as deprecated. This will cause
|
| 396 |
+
a warning to be printed the when the function is used. Usage:
|
| 397 |
+
|
| 398 |
+
>>> from nltk.internals import deprecated
|
| 399 |
+
>>> @deprecated('Use foo() instead')
|
| 400 |
+
... def bar(x):
|
| 401 |
+
... print(x/10)
|
| 402 |
+
|
| 403 |
+
"""
|
| 404 |
+
|
| 405 |
+
def decorator(func):
|
| 406 |
+
msg = f"Function {func.__name__}() has been deprecated. {message}"
|
| 407 |
+
msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ")
|
| 408 |
+
|
| 409 |
+
def newFunc(*args, **kwargs):
|
| 410 |
+
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
|
| 411 |
+
return func(*args, **kwargs)
|
| 412 |
+
|
| 413 |
+
# Copy the old function's name, docstring, & dict
|
| 414 |
+
newFunc.__dict__.update(func.__dict__)
|
| 415 |
+
newFunc.__name__ = func.__name__
|
| 416 |
+
newFunc.__doc__ = func.__doc__
|
| 417 |
+
newFunc.__deprecated__ = True
|
| 418 |
+
# Add a @deprecated field to the docstring.
|
| 419 |
+
_add_epytext_field(newFunc, "deprecated", message)
|
| 420 |
+
return newFunc
|
| 421 |
+
|
| 422 |
+
return decorator
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
class Deprecated:
|
| 426 |
+
"""
|
| 427 |
+
A base class used to mark deprecated classes. A typical usage is to
|
| 428 |
+
alert users that the name of a class has changed:
|
| 429 |
+
|
| 430 |
+
>>> from nltk.internals import Deprecated
|
| 431 |
+
>>> class NewClassName:
|
| 432 |
+
... pass # All logic goes here.
|
| 433 |
+
...
|
| 434 |
+
>>> class OldClassName(Deprecated, NewClassName):
|
| 435 |
+
... "Use NewClassName instead."
|
| 436 |
+
|
| 437 |
+
The docstring of the deprecated class will be used in the
|
| 438 |
+
deprecation warning message.
|
| 439 |
+
"""
|
| 440 |
+
|
| 441 |
+
def __new__(cls, *args, **kwargs):
|
| 442 |
+
# Figure out which class is the deprecated one.
|
| 443 |
+
dep_cls = None
|
| 444 |
+
for base in _mro(cls):
|
| 445 |
+
if Deprecated in base.__bases__:
|
| 446 |
+
dep_cls = base
|
| 447 |
+
break
|
| 448 |
+
assert dep_cls, "Unable to determine which base is deprecated."
|
| 449 |
+
|
| 450 |
+
# Construct an appropriate warning.
|
| 451 |
+
doc = dep_cls.__doc__ or "".strip()
|
| 452 |
+
# If there's a @deprecated field, strip off the field marker.
|
| 453 |
+
doc = re.sub(r"\A\s*@deprecated:", r"", doc)
|
| 454 |
+
# Strip off any indentation.
|
| 455 |
+
doc = re.sub(r"(?m)^\s*", "", doc)
|
| 456 |
+
# Construct a 'name' string.
|
| 457 |
+
name = "Class %s" % dep_cls.__name__
|
| 458 |
+
if cls != dep_cls:
|
| 459 |
+
name += " (base class for %s)" % cls.__name__
|
| 460 |
+
# Put it all together.
|
| 461 |
+
msg = f"{name} has been deprecated. {doc}"
|
| 462 |
+
# Wrap it.
|
| 463 |
+
msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ")
|
| 464 |
+
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
|
| 465 |
+
# Do the actual work of __new__.
|
| 466 |
+
return object.__new__(cls)
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
##########################################################################
|
| 470 |
+
# COUNTER, FOR UNIQUE NAMING
|
| 471 |
+
##########################################################################
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
class Counter:
|
| 475 |
+
"""
|
| 476 |
+
A counter that auto-increments each time its value is read.
|
| 477 |
+
"""
|
| 478 |
+
|
| 479 |
+
def __init__(self, initial_value=0):
|
| 480 |
+
self._value = initial_value
|
| 481 |
+
|
| 482 |
+
def get(self):
|
| 483 |
+
self._value += 1
|
| 484 |
+
return self._value
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
##########################################################################
|
| 488 |
+
# Search for files/binaries
|
| 489 |
+
##########################################################################
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
def find_file_iter(
|
| 493 |
+
filename,
|
| 494 |
+
env_vars=(),
|
| 495 |
+
searchpath=(),
|
| 496 |
+
file_names=None,
|
| 497 |
+
url=None,
|
| 498 |
+
verbose=False,
|
| 499 |
+
finding_dir=False,
|
| 500 |
+
):
|
| 501 |
+
"""
|
| 502 |
+
Search for a file to be used by nltk.
|
| 503 |
+
|
| 504 |
+
:param filename: The name or path of the file.
|
| 505 |
+
:param env_vars: A list of environment variable names to check.
|
| 506 |
+
:param file_names: A list of alternative file names to check.
|
| 507 |
+
:param searchpath: List of directories to search.
|
| 508 |
+
:param url: URL presented to user for download help.
|
| 509 |
+
:param verbose: Whether or not to print path when a file is found.
|
| 510 |
+
"""
|
| 511 |
+
file_names = [filename] + (file_names or [])
|
| 512 |
+
assert isinstance(filename, str)
|
| 513 |
+
assert not isinstance(file_names, str)
|
| 514 |
+
assert not isinstance(searchpath, str)
|
| 515 |
+
if isinstance(env_vars, str):
|
| 516 |
+
env_vars = env_vars.split()
|
| 517 |
+
yielded = False
|
| 518 |
+
|
| 519 |
+
# File exists, no magic
|
| 520 |
+
for alternative in file_names:
|
| 521 |
+
path_to_file = os.path.join(filename, alternative)
|
| 522 |
+
if os.path.isfile(path_to_file):
|
| 523 |
+
if verbose:
|
| 524 |
+
print(f"[Found {filename}: {path_to_file}]")
|
| 525 |
+
yielded = True
|
| 526 |
+
yield path_to_file
|
| 527 |
+
# Check the bare alternatives
|
| 528 |
+
if os.path.isfile(alternative):
|
| 529 |
+
if verbose:
|
| 530 |
+
print(f"[Found {filename}: {alternative}]")
|
| 531 |
+
yielded = True
|
| 532 |
+
yield alternative
|
| 533 |
+
# Check if the alternative is inside a 'file' directory
|
| 534 |
+
path_to_file = os.path.join(filename, "file", alternative)
|
| 535 |
+
if os.path.isfile(path_to_file):
|
| 536 |
+
if verbose:
|
| 537 |
+
print(f"[Found {filename}: {path_to_file}]")
|
| 538 |
+
yielded = True
|
| 539 |
+
yield path_to_file
|
| 540 |
+
|
| 541 |
+
# Check environment variables
|
| 542 |
+
for env_var in env_vars:
|
| 543 |
+
if env_var in os.environ:
|
| 544 |
+
if finding_dir: # This is to file a directory instead of file
|
| 545 |
+
yielded = True
|
| 546 |
+
yield os.environ[env_var]
|
| 547 |
+
|
| 548 |
+
for env_dir in os.environ[env_var].split(os.pathsep):
|
| 549 |
+
# Check if the environment variable contains a direct path to the bin
|
| 550 |
+
if os.path.isfile(env_dir):
|
| 551 |
+
if verbose:
|
| 552 |
+
print(f"[Found {filename}: {env_dir}]")
|
| 553 |
+
yielded = True
|
| 554 |
+
yield env_dir
|
| 555 |
+
# Check if the possible bin names exist inside the environment variable directories
|
| 556 |
+
for alternative in file_names:
|
| 557 |
+
path_to_file = os.path.join(env_dir, alternative)
|
| 558 |
+
if os.path.isfile(path_to_file):
|
| 559 |
+
if verbose:
|
| 560 |
+
print(f"[Found {filename}: {path_to_file}]")
|
| 561 |
+
yielded = True
|
| 562 |
+
yield path_to_file
|
| 563 |
+
# Check if the alternative is inside a 'file' directory
|
| 564 |
+
# path_to_file = os.path.join(env_dir, 'file', alternative)
|
| 565 |
+
|
| 566 |
+
# Check if the alternative is inside a 'bin' directory
|
| 567 |
+
path_to_file = os.path.join(env_dir, "bin", alternative)
|
| 568 |
+
|
| 569 |
+
if os.path.isfile(path_to_file):
|
| 570 |
+
if verbose:
|
| 571 |
+
print(f"[Found {filename}: {path_to_file}]")
|
| 572 |
+
yielded = True
|
| 573 |
+
yield path_to_file
|
| 574 |
+
|
| 575 |
+
# Check the path list.
|
| 576 |
+
for directory in searchpath:
|
| 577 |
+
for alternative in file_names:
|
| 578 |
+
path_to_file = os.path.join(directory, alternative)
|
| 579 |
+
if os.path.isfile(path_to_file):
|
| 580 |
+
yielded = True
|
| 581 |
+
yield path_to_file
|
| 582 |
+
|
| 583 |
+
# If we're on a POSIX system, then try using the 'which' command
|
| 584 |
+
# to find the file.
|
| 585 |
+
if os.name == "posix":
|
| 586 |
+
for alternative in file_names:
|
| 587 |
+
try:
|
| 588 |
+
p = subprocess.Popen(
|
| 589 |
+
["which", alternative],
|
| 590 |
+
stdout=subprocess.PIPE,
|
| 591 |
+
stderr=subprocess.PIPE,
|
| 592 |
+
)
|
| 593 |
+
stdout, stderr = p.communicate()
|
| 594 |
+
path = _decode_stdoutdata(stdout).strip()
|
| 595 |
+
if path.endswith(alternative) and os.path.exists(path):
|
| 596 |
+
if verbose:
|
| 597 |
+
print(f"[Found {filename}: {path}]")
|
| 598 |
+
yielded = True
|
| 599 |
+
yield path
|
| 600 |
+
except (KeyboardInterrupt, SystemExit, OSError):
|
| 601 |
+
raise
|
| 602 |
+
finally:
|
| 603 |
+
pass
|
| 604 |
+
|
| 605 |
+
if not yielded:
|
| 606 |
+
msg = (
|
| 607 |
+
"NLTK was unable to find the %s file!"
|
| 608 |
+
"\nUse software specific "
|
| 609 |
+
"configuration parameters" % filename
|
| 610 |
+
)
|
| 611 |
+
if env_vars:
|
| 612 |
+
msg += " or set the %s environment variable" % env_vars[0]
|
| 613 |
+
msg += "."
|
| 614 |
+
if searchpath:
|
| 615 |
+
msg += "\n\n Searched in:"
|
| 616 |
+
msg += "".join("\n - %s" % d for d in searchpath)
|
| 617 |
+
if url:
|
| 618 |
+
msg += f"\n\n For more information on {filename}, see:\n <{url}>"
|
| 619 |
+
div = "=" * 75
|
| 620 |
+
raise LookupError(f"\n\n{div}\n{msg}\n{div}")
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
def find_file(
|
| 624 |
+
filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False
|
| 625 |
+
):
|
| 626 |
+
return next(
|
| 627 |
+
find_file_iter(filename, env_vars, searchpath, file_names, url, verbose)
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def find_dir(
|
| 632 |
+
filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False
|
| 633 |
+
):
|
| 634 |
+
return next(
|
| 635 |
+
find_file_iter(
|
| 636 |
+
filename, env_vars, searchpath, file_names, url, verbose, finding_dir=True
|
| 637 |
+
)
|
| 638 |
+
)
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
def find_binary_iter(
|
| 642 |
+
name,
|
| 643 |
+
path_to_bin=None,
|
| 644 |
+
env_vars=(),
|
| 645 |
+
searchpath=(),
|
| 646 |
+
binary_names=None,
|
| 647 |
+
url=None,
|
| 648 |
+
verbose=False,
|
| 649 |
+
):
|
| 650 |
+
"""
|
| 651 |
+
Search for a file to be used by nltk.
|
| 652 |
+
|
| 653 |
+
:param name: The name or path of the file.
|
| 654 |
+
:param path_to_bin: The user-supplied binary location (deprecated)
|
| 655 |
+
:param env_vars: A list of environment variable names to check.
|
| 656 |
+
:param file_names: A list of alternative file names to check.
|
| 657 |
+
:param searchpath: List of directories to search.
|
| 658 |
+
:param url: URL presented to user for download help.
|
| 659 |
+
:param verbose: Whether or not to print path when a file is found.
|
| 660 |
+
"""
|
| 661 |
+
yield from find_file_iter(
|
| 662 |
+
path_to_bin or name, env_vars, searchpath, binary_names, url, verbose
|
| 663 |
+
)
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def find_binary(
|
| 667 |
+
name,
|
| 668 |
+
path_to_bin=None,
|
| 669 |
+
env_vars=(),
|
| 670 |
+
searchpath=(),
|
| 671 |
+
binary_names=None,
|
| 672 |
+
url=None,
|
| 673 |
+
verbose=False,
|
| 674 |
+
):
|
| 675 |
+
return next(
|
| 676 |
+
find_binary_iter(
|
| 677 |
+
name, path_to_bin, env_vars, searchpath, binary_names, url, verbose
|
| 678 |
+
)
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
def find_jar_iter(
|
| 683 |
+
name_pattern,
|
| 684 |
+
path_to_jar=None,
|
| 685 |
+
env_vars=(),
|
| 686 |
+
searchpath=(),
|
| 687 |
+
url=None,
|
| 688 |
+
verbose=False,
|
| 689 |
+
is_regex=False,
|
| 690 |
+
):
|
| 691 |
+
"""
|
| 692 |
+
Search for a jar that is used by nltk.
|
| 693 |
+
|
| 694 |
+
:param name_pattern: The name of the jar file
|
| 695 |
+
:param path_to_jar: The user-supplied jar location, or None.
|
| 696 |
+
:param env_vars: A list of environment variable names to check
|
| 697 |
+
in addition to the CLASSPATH variable which is
|
| 698 |
+
checked by default.
|
| 699 |
+
:param searchpath: List of directories to search.
|
| 700 |
+
:param is_regex: Whether name is a regular expression.
|
| 701 |
+
"""
|
| 702 |
+
|
| 703 |
+
assert isinstance(name_pattern, str)
|
| 704 |
+
assert not isinstance(searchpath, str)
|
| 705 |
+
if isinstance(env_vars, str):
|
| 706 |
+
env_vars = env_vars.split()
|
| 707 |
+
yielded = False
|
| 708 |
+
|
| 709 |
+
# Make sure we check the CLASSPATH first
|
| 710 |
+
env_vars = ["CLASSPATH"] + list(env_vars)
|
| 711 |
+
|
| 712 |
+
# If an explicit location was given, then check it, and yield it if
|
| 713 |
+
# it's present; otherwise, complain.
|
| 714 |
+
if path_to_jar is not None:
|
| 715 |
+
if os.path.isfile(path_to_jar):
|
| 716 |
+
yielded = True
|
| 717 |
+
yield path_to_jar
|
| 718 |
+
else:
|
| 719 |
+
raise LookupError(
|
| 720 |
+
f"Could not find {name_pattern} jar file at {path_to_jar}"
|
| 721 |
+
)
|
| 722 |
+
|
| 723 |
+
# Check environment variables
|
| 724 |
+
for env_var in env_vars:
|
| 725 |
+
if env_var in os.environ:
|
| 726 |
+
if env_var == "CLASSPATH":
|
| 727 |
+
classpath = os.environ["CLASSPATH"]
|
| 728 |
+
for cp in classpath.split(os.path.pathsep):
|
| 729 |
+
cp = os.path.expanduser(cp)
|
| 730 |
+
if os.path.isfile(cp):
|
| 731 |
+
filename = os.path.basename(cp)
|
| 732 |
+
if (
|
| 733 |
+
is_regex
|
| 734 |
+
and re.match(name_pattern, filename)
|
| 735 |
+
or (not is_regex and filename == name_pattern)
|
| 736 |
+
):
|
| 737 |
+
if verbose:
|
| 738 |
+
print(f"[Found {name_pattern}: {cp}]")
|
| 739 |
+
yielded = True
|
| 740 |
+
yield cp
|
| 741 |
+
# The case where user put directory containing the jar file in the classpath
|
| 742 |
+
if os.path.isdir(cp):
|
| 743 |
+
if not is_regex:
|
| 744 |
+
if os.path.isfile(os.path.join(cp, name_pattern)):
|
| 745 |
+
if verbose:
|
| 746 |
+
print(f"[Found {name_pattern}: {cp}]")
|
| 747 |
+
yielded = True
|
| 748 |
+
yield os.path.join(cp, name_pattern)
|
| 749 |
+
else:
|
| 750 |
+
# Look for file using regular expression
|
| 751 |
+
for file_name in os.listdir(cp):
|
| 752 |
+
if re.match(name_pattern, file_name):
|
| 753 |
+
if verbose:
|
| 754 |
+
print(
|
| 755 |
+
"[Found %s: %s]"
|
| 756 |
+
% (
|
| 757 |
+
name_pattern,
|
| 758 |
+
os.path.join(cp, file_name),
|
| 759 |
+
)
|
| 760 |
+
)
|
| 761 |
+
yielded = True
|
| 762 |
+
yield os.path.join(cp, file_name)
|
| 763 |
+
|
| 764 |
+
else:
|
| 765 |
+
jar_env = os.path.expanduser(os.environ[env_var])
|
| 766 |
+
jar_iter = (
|
| 767 |
+
(
|
| 768 |
+
os.path.join(jar_env, path_to_jar)
|
| 769 |
+
for path_to_jar in os.listdir(jar_env)
|
| 770 |
+
)
|
| 771 |
+
if os.path.isdir(jar_env)
|
| 772 |
+
else (jar_env,)
|
| 773 |
+
)
|
| 774 |
+
for path_to_jar in jar_iter:
|
| 775 |
+
if os.path.isfile(path_to_jar):
|
| 776 |
+
filename = os.path.basename(path_to_jar)
|
| 777 |
+
if (
|
| 778 |
+
is_regex
|
| 779 |
+
and re.match(name_pattern, filename)
|
| 780 |
+
or (not is_regex and filename == name_pattern)
|
| 781 |
+
):
|
| 782 |
+
if verbose:
|
| 783 |
+
print(f"[Found {name_pattern}: {path_to_jar}]")
|
| 784 |
+
yielded = True
|
| 785 |
+
yield path_to_jar
|
| 786 |
+
|
| 787 |
+
# Check the path list.
|
| 788 |
+
for directory in searchpath:
|
| 789 |
+
if is_regex:
|
| 790 |
+
for filename in os.listdir(directory):
|
| 791 |
+
path_to_jar = os.path.join(directory, filename)
|
| 792 |
+
if os.path.isfile(path_to_jar):
|
| 793 |
+
if re.match(name_pattern, filename):
|
| 794 |
+
if verbose:
|
| 795 |
+
print(f"[Found {filename}: {path_to_jar}]")
|
| 796 |
+
yielded = True
|
| 797 |
+
yield path_to_jar
|
| 798 |
+
else:
|
| 799 |
+
path_to_jar = os.path.join(directory, name_pattern)
|
| 800 |
+
if os.path.isfile(path_to_jar):
|
| 801 |
+
if verbose:
|
| 802 |
+
print(f"[Found {name_pattern}: {path_to_jar}]")
|
| 803 |
+
yielded = True
|
| 804 |
+
yield path_to_jar
|
| 805 |
+
|
| 806 |
+
if not yielded:
|
| 807 |
+
# If nothing was found, raise an error
|
| 808 |
+
msg = "NLTK was unable to find %s!" % name_pattern
|
| 809 |
+
if env_vars:
|
| 810 |
+
msg += " Set the %s environment variable" % env_vars[0]
|
| 811 |
+
msg = textwrap.fill(msg + ".", initial_indent=" ", subsequent_indent=" ")
|
| 812 |
+
if searchpath:
|
| 813 |
+
msg += "\n\n Searched in:"
|
| 814 |
+
msg += "".join("\n - %s" % d for d in searchpath)
|
| 815 |
+
if url:
|
| 816 |
+
msg += "\n\n For more information, on {}, see:\n <{}>".format(
|
| 817 |
+
name_pattern,
|
| 818 |
+
url,
|
| 819 |
+
)
|
| 820 |
+
div = "=" * 75
|
| 821 |
+
raise LookupError(f"\n\n{div}\n{msg}\n{div}")
|
| 822 |
+
|
| 823 |
+
|
| 824 |
+
def find_jar(
|
| 825 |
+
name_pattern,
|
| 826 |
+
path_to_jar=None,
|
| 827 |
+
env_vars=(),
|
| 828 |
+
searchpath=(),
|
| 829 |
+
url=None,
|
| 830 |
+
verbose=False,
|
| 831 |
+
is_regex=False,
|
| 832 |
+
):
|
| 833 |
+
return next(
|
| 834 |
+
find_jar_iter(
|
| 835 |
+
name_pattern, path_to_jar, env_vars, searchpath, url, verbose, is_regex
|
| 836 |
+
)
|
| 837 |
+
)
|
| 838 |
+
|
| 839 |
+
|
| 840 |
+
def find_jars_within_path(path_to_jars):
|
| 841 |
+
return [
|
| 842 |
+
os.path.join(root, filename)
|
| 843 |
+
for root, dirnames, filenames in os.walk(path_to_jars)
|
| 844 |
+
for filename in fnmatch.filter(filenames, "*.jar")
|
| 845 |
+
]
|
| 846 |
+
|
| 847 |
+
|
| 848 |
+
def _decode_stdoutdata(stdoutdata):
|
| 849 |
+
"""Convert data read from stdout/stderr to unicode"""
|
| 850 |
+
if not isinstance(stdoutdata, bytes):
|
| 851 |
+
return stdoutdata
|
| 852 |
+
|
| 853 |
+
encoding = getattr(sys.__stdout__, "encoding", locale.getpreferredencoding())
|
| 854 |
+
if encoding is None:
|
| 855 |
+
return stdoutdata.decode()
|
| 856 |
+
return stdoutdata.decode(encoding)
|
| 857 |
+
|
| 858 |
+
|
| 859 |
+
##########################################################################
|
| 860 |
+
# Import Stdlib Module
|
| 861 |
+
##########################################################################
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
def import_from_stdlib(module):
|
| 865 |
+
"""
|
| 866 |
+
When python is run from within the nltk/ directory tree, the
|
| 867 |
+
current directory is included at the beginning of the search path.
|
| 868 |
+
Unfortunately, that means that modules within nltk can sometimes
|
| 869 |
+
shadow standard library modules. As an example, the stdlib
|
| 870 |
+
'inspect' module will attempt to import the stdlib 'tokenize'
|
| 871 |
+
module, but will instead end up importing NLTK's 'tokenize' module
|
| 872 |
+
instead (causing the import to fail).
|
| 873 |
+
"""
|
| 874 |
+
old_path = sys.path
|
| 875 |
+
sys.path = [d for d in sys.path if d not in ("", ".")]
|
| 876 |
+
m = __import__(module)
|
| 877 |
+
sys.path = old_path
|
| 878 |
+
return m
|
| 879 |
+
|
| 880 |
+
|
| 881 |
+
##########################################################################
|
| 882 |
+
# Wrapper for ElementTree Elements
|
| 883 |
+
##########################################################################
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
class ElementWrapper:
|
| 887 |
+
"""
|
| 888 |
+
A wrapper around ElementTree Element objects whose main purpose is
|
| 889 |
+
to provide nicer __repr__ and __str__ methods. In addition, any
|
| 890 |
+
of the wrapped Element's methods that return other Element objects
|
| 891 |
+
are overridden to wrap those values before returning them.
|
| 892 |
+
|
| 893 |
+
This makes Elements more convenient to work with in
|
| 894 |
+
interactive sessions and doctests, at the expense of some
|
| 895 |
+
efficiency.
|
| 896 |
+
"""
|
| 897 |
+
|
| 898 |
+
# Prevent double-wrapping:
|
| 899 |
+
def __new__(cls, etree):
|
| 900 |
+
"""
|
| 901 |
+
Create and return a wrapper around a given Element object.
|
| 902 |
+
If ``etree`` is an ``ElementWrapper``, then ``etree`` is
|
| 903 |
+
returned as-is.
|
| 904 |
+
"""
|
| 905 |
+
if isinstance(etree, ElementWrapper):
|
| 906 |
+
return etree
|
| 907 |
+
else:
|
| 908 |
+
return object.__new__(ElementWrapper)
|
| 909 |
+
|
| 910 |
+
def __init__(self, etree):
|
| 911 |
+
r"""
|
| 912 |
+
Initialize a new Element wrapper for ``etree``.
|
| 913 |
+
|
| 914 |
+
If ``etree`` is a string, then it will be converted to an
|
| 915 |
+
Element object using ``ElementTree.fromstring()`` first:
|
| 916 |
+
|
| 917 |
+
>>> ElementWrapper("<test></test>")
|
| 918 |
+
<Element "<?xml version='1.0' encoding='utf8'?>\n<test />">
|
| 919 |
+
|
| 920 |
+
"""
|
| 921 |
+
if isinstance(etree, str):
|
| 922 |
+
etree = ElementTree.fromstring(etree)
|
| 923 |
+
self.__dict__["_etree"] = etree
|
| 924 |
+
|
| 925 |
+
def unwrap(self):
|
| 926 |
+
"""
|
| 927 |
+
Return the Element object wrapped by this wrapper.
|
| 928 |
+
"""
|
| 929 |
+
return self._etree
|
| 930 |
+
|
| 931 |
+
##////////////////////////////////////////////////////////////
|
| 932 |
+
# { String Representation
|
| 933 |
+
##////////////////////////////////////////////////////////////
|
| 934 |
+
|
| 935 |
+
def __repr__(self):
|
| 936 |
+
s = ElementTree.tostring(self._etree, encoding="utf8").decode("utf8")
|
| 937 |
+
if len(s) > 60:
|
| 938 |
+
e = s.rfind("<")
|
| 939 |
+
if (len(s) - e) > 30:
|
| 940 |
+
e = -20
|
| 941 |
+
s = f"{s[:30]}...{s[e:]}"
|
| 942 |
+
return "<Element %r>" % s
|
| 943 |
+
|
| 944 |
+
def __str__(self):
|
| 945 |
+
"""
|
| 946 |
+
:return: the result of applying ``ElementTree.tostring()`` to
|
| 947 |
+
the wrapped Element object.
|
| 948 |
+
"""
|
| 949 |
+
return (
|
| 950 |
+
ElementTree.tostring(self._etree, encoding="utf8").decode("utf8").rstrip()
|
| 951 |
+
)
|
| 952 |
+
|
| 953 |
+
##////////////////////////////////////////////////////////////
|
| 954 |
+
# { Element interface Delegation (pass-through)
|
| 955 |
+
##////////////////////////////////////////////////////////////
|
| 956 |
+
|
| 957 |
+
def __getattr__(self, attrib):
|
| 958 |
+
return getattr(self._etree, attrib)
|
| 959 |
+
|
| 960 |
+
def __setattr__(self, attr, value):
|
| 961 |
+
return setattr(self._etree, attr, value)
|
| 962 |
+
|
| 963 |
+
def __delattr__(self, attr):
|
| 964 |
+
return delattr(self._etree, attr)
|
| 965 |
+
|
| 966 |
+
def __setitem__(self, index, element):
|
| 967 |
+
self._etree[index] = element
|
| 968 |
+
|
| 969 |
+
def __delitem__(self, index):
|
| 970 |
+
del self._etree[index]
|
| 971 |
+
|
| 972 |
+
def __setslice__(self, start, stop, elements):
|
| 973 |
+
self._etree[start:stop] = elements
|
| 974 |
+
|
| 975 |
+
def __delslice__(self, start, stop):
|
| 976 |
+
del self._etree[start:stop]
|
| 977 |
+
|
| 978 |
+
def __len__(self):
|
| 979 |
+
return len(self._etree)
|
| 980 |
+
|
| 981 |
+
##////////////////////////////////////////////////////////////
|
| 982 |
+
# { Element interface Delegation (wrap result)
|
| 983 |
+
##////////////////////////////////////////////////////////////
|
| 984 |
+
|
| 985 |
+
def __getitem__(self, index):
|
| 986 |
+
return ElementWrapper(self._etree[index])
|
| 987 |
+
|
| 988 |
+
def __getslice__(self, start, stop):
|
| 989 |
+
return [ElementWrapper(elt) for elt in self._etree[start:stop]]
|
| 990 |
+
|
| 991 |
+
def getchildren(self):
|
| 992 |
+
return [ElementWrapper(elt) for elt in self._etree]
|
| 993 |
+
|
| 994 |
+
def getiterator(self, tag=None):
|
| 995 |
+
return (ElementWrapper(elt) for elt in self._etree.getiterator(tag))
|
| 996 |
+
|
| 997 |
+
def makeelement(self, tag, attrib):
|
| 998 |
+
return ElementWrapper(self._etree.makeelement(tag, attrib))
|
| 999 |
+
|
| 1000 |
+
def find(self, path):
|
| 1001 |
+
elt = self._etree.find(path)
|
| 1002 |
+
if elt is None:
|
| 1003 |
+
return elt
|
| 1004 |
+
else:
|
| 1005 |
+
return ElementWrapper(elt)
|
| 1006 |
+
|
| 1007 |
+
def findall(self, path):
|
| 1008 |
+
return [ElementWrapper(elt) for elt in self._etree.findall(path)]
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
######################################################################
|
| 1012 |
+
# Helper for Handling Slicing
|
| 1013 |
+
######################################################################
|
| 1014 |
+
|
| 1015 |
+
|
| 1016 |
+
def slice_bounds(sequence, slice_obj, allow_step=False):
|
| 1017 |
+
"""
|
| 1018 |
+
Given a slice, return the corresponding (start, stop) bounds,
|
| 1019 |
+
taking into account None indices and negative indices. The
|
| 1020 |
+
following guarantees are made for the returned start and stop values:
|
| 1021 |
+
|
| 1022 |
+
- 0 <= start <= len(sequence)
|
| 1023 |
+
- 0 <= stop <= len(sequence)
|
| 1024 |
+
- start <= stop
|
| 1025 |
+
|
| 1026 |
+
:raise ValueError: If ``slice_obj.step`` is not None.
|
| 1027 |
+
:param allow_step: If true, then the slice object may have a
|
| 1028 |
+
non-None step. If it does, then return a tuple
|
| 1029 |
+
(start, stop, step).
|
| 1030 |
+
"""
|
| 1031 |
+
start, stop = (slice_obj.start, slice_obj.stop)
|
| 1032 |
+
|
| 1033 |
+
# If allow_step is true, then include the step in our return
|
| 1034 |
+
# value tuple.
|
| 1035 |
+
if allow_step:
|
| 1036 |
+
step = slice_obj.step
|
| 1037 |
+
if step is None:
|
| 1038 |
+
step = 1
|
| 1039 |
+
# Use a recursive call without allow_step to find the slice
|
| 1040 |
+
# bounds. If step is negative, then the roles of start and
|
| 1041 |
+
# stop (in terms of default values, etc), are swapped.
|
| 1042 |
+
if step < 0:
|
| 1043 |
+
start, stop = slice_bounds(sequence, slice(stop, start))
|
| 1044 |
+
else:
|
| 1045 |
+
start, stop = slice_bounds(sequence, slice(start, stop))
|
| 1046 |
+
return start, stop, step
|
| 1047 |
+
|
| 1048 |
+
# Otherwise, make sure that no non-default step value is used.
|
| 1049 |
+
elif slice_obj.step not in (None, 1):
|
| 1050 |
+
raise ValueError(
|
| 1051 |
+
"slices with steps are not supported by %s" % sequence.__class__.__name__
|
| 1052 |
+
)
|
| 1053 |
+
|
| 1054 |
+
# Supply default offsets.
|
| 1055 |
+
if start is None:
|
| 1056 |
+
start = 0
|
| 1057 |
+
if stop is None:
|
| 1058 |
+
stop = len(sequence)
|
| 1059 |
+
|
| 1060 |
+
# Handle negative indices.
|
| 1061 |
+
if start < 0:
|
| 1062 |
+
start = max(0, len(sequence) + start)
|
| 1063 |
+
if stop < 0:
|
| 1064 |
+
stop = max(0, len(sequence) + stop)
|
| 1065 |
+
|
| 1066 |
+
# Make sure stop doesn't go past the end of the list. Note that
|
| 1067 |
+
# we avoid calculating len(sequence) if possible, because for lazy
|
| 1068 |
+
# sequences, calculating the length of a sequence can be expensive.
|
| 1069 |
+
if stop > 0:
|
| 1070 |
+
try:
|
| 1071 |
+
sequence[stop - 1]
|
| 1072 |
+
except IndexError:
|
| 1073 |
+
stop = len(sequence)
|
| 1074 |
+
|
| 1075 |
+
# Make sure start isn't past stop.
|
| 1076 |
+
start = min(start, stop)
|
| 1077 |
+
|
| 1078 |
+
# That's all folks!
|
| 1079 |
+
return start, stop
|
| 1080 |
+
|
| 1081 |
+
|
| 1082 |
+
######################################################################
|
| 1083 |
+
# Permission Checking
|
| 1084 |
+
######################################################################
|
| 1085 |
+
|
| 1086 |
+
|
| 1087 |
+
def is_writable(path):
|
| 1088 |
+
# Ensure that it exists.
|
| 1089 |
+
if not os.path.exists(path):
|
| 1090 |
+
return False
|
| 1091 |
+
|
| 1092 |
+
# If we're on a posix system, check its permissions.
|
| 1093 |
+
if hasattr(os, "getuid"):
|
| 1094 |
+
statdata = os.stat(path)
|
| 1095 |
+
perm = stat.S_IMODE(statdata.st_mode)
|
| 1096 |
+
# is it world-writable?
|
| 1097 |
+
if perm & 0o002:
|
| 1098 |
+
return True
|
| 1099 |
+
# do we own it?
|
| 1100 |
+
elif statdata.st_uid == os.getuid() and (perm & 0o200):
|
| 1101 |
+
return True
|
| 1102 |
+
# are we in a group that can write to it?
|
| 1103 |
+
elif (statdata.st_gid in [os.getgid()] + os.getgroups()) and (perm & 0o020):
|
| 1104 |
+
return True
|
| 1105 |
+
# otherwise, we can't write to it.
|
| 1106 |
+
else:
|
| 1107 |
+
return False
|
| 1108 |
+
|
| 1109 |
+
# Otherwise, we'll assume it's writable.
|
| 1110 |
+
# [xx] should we do other checks on other platforms?
|
| 1111 |
+
return True
|
| 1112 |
+
|
| 1113 |
+
|
| 1114 |
+
######################################################################
|
| 1115 |
+
# NLTK Error reporting
|
| 1116 |
+
######################################################################
|
| 1117 |
+
|
| 1118 |
+
|
| 1119 |
+
def raise_unorderable_types(ordering, a, b):
|
| 1120 |
+
raise TypeError(
|
| 1121 |
+
"unorderable types: %s() %s %s()"
|
| 1122 |
+
% (type(a).__name__, ordering, type(b).__name__)
|
| 1123 |
+
)
|
lib/python3.10/site-packages/nltk/jsontags.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: JSON Encoder/Decoder Helpers
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Author: Steven Xu <xxu@student.unimelb.edu.au>
|
| 5 |
+
#
|
| 6 |
+
# URL: <https://www.nltk.org/>
|
| 7 |
+
# For license information, see LICENSE.TXT
|
| 8 |
+
|
| 9 |
+
"""
|
| 10 |
+
Register JSON tags, so the nltk data loader knows what module and class to look for.
|
| 11 |
+
|
| 12 |
+
NLTK uses simple '!' tags to mark the types of objects, but the fully-qualified
|
| 13 |
+
"tag:nltk.org,2011:" prefix is also accepted in case anyone ends up
|
| 14 |
+
using it.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
|
| 19 |
+
json_tags = {}
|
| 20 |
+
|
| 21 |
+
TAG_PREFIX = "!"
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def register_tag(cls):
|
| 25 |
+
"""
|
| 26 |
+
Decorates a class to register it's json tag.
|
| 27 |
+
"""
|
| 28 |
+
json_tags[TAG_PREFIX + getattr(cls, "json_tag")] = cls
|
| 29 |
+
return cls
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class JSONTaggedEncoder(json.JSONEncoder):
|
| 33 |
+
def default(self, obj):
|
| 34 |
+
obj_tag = getattr(obj, "json_tag", None)
|
| 35 |
+
if obj_tag is None:
|
| 36 |
+
return super().default(obj)
|
| 37 |
+
obj_tag = TAG_PREFIX + obj_tag
|
| 38 |
+
obj = obj.encode_json_obj()
|
| 39 |
+
return {obj_tag: obj}
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class JSONTaggedDecoder(json.JSONDecoder):
|
| 43 |
+
def decode(self, s):
|
| 44 |
+
return self.decode_obj(super().decode(s))
|
| 45 |
+
|
| 46 |
+
@classmethod
|
| 47 |
+
def decode_obj(cls, obj):
|
| 48 |
+
# Decode nested objects first.
|
| 49 |
+
if isinstance(obj, dict):
|
| 50 |
+
obj = {key: cls.decode_obj(val) for (key, val) in obj.items()}
|
| 51 |
+
elif isinstance(obj, list):
|
| 52 |
+
obj = list(cls.decode_obj(val) for val in obj)
|
| 53 |
+
# Check if we have a tagged object.
|
| 54 |
+
if not isinstance(obj, dict) or len(obj) != 1:
|
| 55 |
+
return obj
|
| 56 |
+
obj_tag = next(iter(obj.keys()))
|
| 57 |
+
if not obj_tag.startswith("!"):
|
| 58 |
+
return obj
|
| 59 |
+
if obj_tag not in json_tags:
|
| 60 |
+
raise ValueError("Unknown tag", obj_tag)
|
| 61 |
+
obj_cls = json_tags[obj_tag]
|
| 62 |
+
return obj_cls.decode_json_obj(obj[obj_tag])
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
__all__ = ["register_tag", "json_tags", "JSONTaggedEncoder", "JSONTaggedDecoder"]
|
lib/python3.10/site-packages/nltk/langnames.py
ADDED
|
@@ -0,0 +1,730 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: Language Codes
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2022-2023 NLTK Project
|
| 4 |
+
# Author: Eric Kafe <kafe.eric@gmail.com>
|
| 5 |
+
# URL: <https://www.nltk.org/>
|
| 6 |
+
# For license information, see LICENSE.TXT
|
| 7 |
+
#
|
| 8 |
+
# iso639-3 language codes (C) https://iso639-3.sil.org/
|
| 9 |
+
|
| 10 |
+
"""
|
| 11 |
+
Translate between language names and language codes.
|
| 12 |
+
|
| 13 |
+
The iso639-3 language codes were downloaded from the registration authority at
|
| 14 |
+
https://iso639-3.sil.org/
|
| 15 |
+
|
| 16 |
+
The iso639-3 codeset is evolving, so retired language codes are kept in the
|
| 17 |
+
"iso639retired" dictionary, which is used as fallback by the wrapper functions
|
| 18 |
+
"langname" and "langcode", in order to support the lookup of retired codes.
|
| 19 |
+
|
| 20 |
+
The "langcode" function returns the current iso639-3 code if there is one,
|
| 21 |
+
and falls back to the retired code otherwise. As specified by BCP-47,
|
| 22 |
+
it returns the shortest (2-letter) code by default, but 3-letter codes
|
| 23 |
+
are also available:
|
| 24 |
+
|
| 25 |
+
>>> import nltk.langnames as lgn
|
| 26 |
+
>>> lgn.langname('fri') #'fri' is a retired code
|
| 27 |
+
'Western Frisian'
|
| 28 |
+
|
| 29 |
+
The current code is different from the retired one:
|
| 30 |
+
>>> lgn.langcode('Western Frisian')
|
| 31 |
+
'fy'
|
| 32 |
+
|
| 33 |
+
>>> lgn.langcode('Western Frisian', typ = 3)
|
| 34 |
+
'fry'
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
import re
|
| 39 |
+
from warnings import warn
|
| 40 |
+
|
| 41 |
+
from nltk.corpus import bcp47
|
| 42 |
+
|
| 43 |
+
codepattern = re.compile("[a-z][a-z][a-z]?")
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def langname(tag, typ="full"):
|
| 47 |
+
"""
|
| 48 |
+
Convert a composite BCP-47 tag to a language name
|
| 49 |
+
|
| 50 |
+
>>> from nltk.langnames import langname
|
| 51 |
+
>>> langname('ca-Latn-ES-valencia')
|
| 52 |
+
'Catalan: Latin: Spain: Valencian'
|
| 53 |
+
|
| 54 |
+
>>> langname('ca-Latn-ES-valencia', typ="short")
|
| 55 |
+
'Catalan'
|
| 56 |
+
"""
|
| 57 |
+
tags = tag.split("-")
|
| 58 |
+
code = tags[0].lower()
|
| 59 |
+
if codepattern.fullmatch(code):
|
| 60 |
+
if code in iso639retired: # retired codes
|
| 61 |
+
return iso639retired[code]
|
| 62 |
+
elif code in iso639short: # 3-letter codes
|
| 63 |
+
code2 = iso639short[code] # convert to 2-letter code
|
| 64 |
+
warn(f"Shortening {code!r} to {code2!r}", stacklevel=2)
|
| 65 |
+
tag = "-".join([code2] + tags[1:])
|
| 66 |
+
name = bcp47.name(tag) # parse according to BCP-47
|
| 67 |
+
if typ == "full":
|
| 68 |
+
return name # include all subtags
|
| 69 |
+
elif name:
|
| 70 |
+
return name.split(":")[0] # only the language subtag
|
| 71 |
+
else:
|
| 72 |
+
warn(f"Could not find code in {code!r}", stacklevel=2)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def langcode(name, typ=2):
|
| 76 |
+
"""
|
| 77 |
+
Convert language name to iso639-3 language code. Returns the short 2-letter
|
| 78 |
+
code by default, if one is available, and the 3-letter code otherwise:
|
| 79 |
+
|
| 80 |
+
>>> from nltk.langnames import langcode
|
| 81 |
+
>>> langcode('Modern Greek (1453-)')
|
| 82 |
+
'el'
|
| 83 |
+
|
| 84 |
+
Specify 'typ=3' to get the 3-letter code:
|
| 85 |
+
|
| 86 |
+
>>> langcode('Modern Greek (1453-)', typ=3)
|
| 87 |
+
'ell'
|
| 88 |
+
"""
|
| 89 |
+
if name in bcp47.langcode:
|
| 90 |
+
code = bcp47.langcode[name]
|
| 91 |
+
if typ == 3 and code in iso639long:
|
| 92 |
+
code = iso639long[code] # convert to 3-letter code
|
| 93 |
+
return code
|
| 94 |
+
elif name in iso639code_retired:
|
| 95 |
+
return iso639code_retired[name]
|
| 96 |
+
else:
|
| 97 |
+
warn(f"Could not find language in {name!r}", stacklevel=2)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# =======================================================================
|
| 101 |
+
# Translate betwwen Wikidata Q-codes and BCP-47 codes or names
|
| 102 |
+
# .......................................................................
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def tag2q(tag):
|
| 106 |
+
"""
|
| 107 |
+
Convert BCP-47 tag to Wikidata Q-code
|
| 108 |
+
|
| 109 |
+
>>> tag2q('nds-u-sd-demv')
|
| 110 |
+
'Q4289225'
|
| 111 |
+
"""
|
| 112 |
+
return bcp47.wiki_q[tag]
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def q2tag(qcode):
|
| 116 |
+
"""
|
| 117 |
+
Convert Wikidata Q-code to BCP-47 tag
|
| 118 |
+
|
| 119 |
+
>>> q2tag('Q4289225')
|
| 120 |
+
'nds-u-sd-demv'
|
| 121 |
+
"""
|
| 122 |
+
return wiki_bcp47[qcode]
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def q2name(qcode, typ="full"):
|
| 126 |
+
"""
|
| 127 |
+
Convert Wikidata Q-code to BCP-47 (full or short) language name
|
| 128 |
+
|
| 129 |
+
>>> q2name('Q4289225')
|
| 130 |
+
'Low German: Mecklenburg-Vorpommern'
|
| 131 |
+
|
| 132 |
+
>>> q2name('Q4289225', "short")
|
| 133 |
+
'Low German'
|
| 134 |
+
"""
|
| 135 |
+
return langname(q2tag(qcode), typ)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def lang2q(name):
|
| 139 |
+
"""
|
| 140 |
+
Convert simple language name to Wikidata Q-code
|
| 141 |
+
|
| 142 |
+
>>> lang2q('Low German')
|
| 143 |
+
'Q25433'
|
| 144 |
+
"""
|
| 145 |
+
return tag2q(langcode(name))
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
# ======================================================================
|
| 149 |
+
# Data dictionaries
|
| 150 |
+
# ......................................................................
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def inverse_dict(dic):
|
| 154 |
+
"""Return inverse mapping, but only if it is bijective"""
|
| 155 |
+
if len(dic.keys()) == len(set(dic.values())):
|
| 156 |
+
return {val: key for (key, val) in dic.items()}
|
| 157 |
+
else:
|
| 158 |
+
warn("This dictionary has no bijective inverse mapping.")
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
bcp47.load_wiki_q() # Wikidata conversion table needs to be loaded explicitly
|
| 162 |
+
wiki_bcp47 = inverse_dict(bcp47.wiki_q)
|
| 163 |
+
|
| 164 |
+
iso639short = {
|
| 165 |
+
"aar": "aa",
|
| 166 |
+
"abk": "ab",
|
| 167 |
+
"afr": "af",
|
| 168 |
+
"aka": "ak",
|
| 169 |
+
"amh": "am",
|
| 170 |
+
"ara": "ar",
|
| 171 |
+
"arg": "an",
|
| 172 |
+
"asm": "as",
|
| 173 |
+
"ava": "av",
|
| 174 |
+
"ave": "ae",
|
| 175 |
+
"aym": "ay",
|
| 176 |
+
"aze": "az",
|
| 177 |
+
"bak": "ba",
|
| 178 |
+
"bam": "bm",
|
| 179 |
+
"bel": "be",
|
| 180 |
+
"ben": "bn",
|
| 181 |
+
"bis": "bi",
|
| 182 |
+
"bod": "bo",
|
| 183 |
+
"bos": "bs",
|
| 184 |
+
"bre": "br",
|
| 185 |
+
"bul": "bg",
|
| 186 |
+
"cat": "ca",
|
| 187 |
+
"ces": "cs",
|
| 188 |
+
"cha": "ch",
|
| 189 |
+
"che": "ce",
|
| 190 |
+
"chu": "cu",
|
| 191 |
+
"chv": "cv",
|
| 192 |
+
"cor": "kw",
|
| 193 |
+
"cos": "co",
|
| 194 |
+
"cre": "cr",
|
| 195 |
+
"cym": "cy",
|
| 196 |
+
"dan": "da",
|
| 197 |
+
"deu": "de",
|
| 198 |
+
"div": "dv",
|
| 199 |
+
"dzo": "dz",
|
| 200 |
+
"ell": "el",
|
| 201 |
+
"eng": "en",
|
| 202 |
+
"epo": "eo",
|
| 203 |
+
"est": "et",
|
| 204 |
+
"eus": "eu",
|
| 205 |
+
"ewe": "ee",
|
| 206 |
+
"fao": "fo",
|
| 207 |
+
"fas": "fa",
|
| 208 |
+
"fij": "fj",
|
| 209 |
+
"fin": "fi",
|
| 210 |
+
"fra": "fr",
|
| 211 |
+
"fry": "fy",
|
| 212 |
+
"ful": "ff",
|
| 213 |
+
"gla": "gd",
|
| 214 |
+
"gle": "ga",
|
| 215 |
+
"glg": "gl",
|
| 216 |
+
"glv": "gv",
|
| 217 |
+
"grn": "gn",
|
| 218 |
+
"guj": "gu",
|
| 219 |
+
"hat": "ht",
|
| 220 |
+
"hau": "ha",
|
| 221 |
+
"hbs": "sh",
|
| 222 |
+
"heb": "he",
|
| 223 |
+
"her": "hz",
|
| 224 |
+
"hin": "hi",
|
| 225 |
+
"hmo": "ho",
|
| 226 |
+
"hrv": "hr",
|
| 227 |
+
"hun": "hu",
|
| 228 |
+
"hye": "hy",
|
| 229 |
+
"ibo": "ig",
|
| 230 |
+
"ido": "io",
|
| 231 |
+
"iii": "ii",
|
| 232 |
+
"iku": "iu",
|
| 233 |
+
"ile": "ie",
|
| 234 |
+
"ina": "ia",
|
| 235 |
+
"ind": "id",
|
| 236 |
+
"ipk": "ik",
|
| 237 |
+
"isl": "is",
|
| 238 |
+
"ita": "it",
|
| 239 |
+
"jav": "jv",
|
| 240 |
+
"jpn": "ja",
|
| 241 |
+
"kal": "kl",
|
| 242 |
+
"kan": "kn",
|
| 243 |
+
"kas": "ks",
|
| 244 |
+
"kat": "ka",
|
| 245 |
+
"kau": "kr",
|
| 246 |
+
"kaz": "kk",
|
| 247 |
+
"khm": "km",
|
| 248 |
+
"kik": "ki",
|
| 249 |
+
"kin": "rw",
|
| 250 |
+
"kir": "ky",
|
| 251 |
+
"kom": "kv",
|
| 252 |
+
"kon": "kg",
|
| 253 |
+
"kor": "ko",
|
| 254 |
+
"kua": "kj",
|
| 255 |
+
"kur": "ku",
|
| 256 |
+
"lao": "lo",
|
| 257 |
+
"lat": "la",
|
| 258 |
+
"lav": "lv",
|
| 259 |
+
"lim": "li",
|
| 260 |
+
"lin": "ln",
|
| 261 |
+
"lit": "lt",
|
| 262 |
+
"ltz": "lb",
|
| 263 |
+
"lub": "lu",
|
| 264 |
+
"lug": "lg",
|
| 265 |
+
"mah": "mh",
|
| 266 |
+
"mal": "ml",
|
| 267 |
+
"mar": "mr",
|
| 268 |
+
"mkd": "mk",
|
| 269 |
+
"mlg": "mg",
|
| 270 |
+
"mlt": "mt",
|
| 271 |
+
"mon": "mn",
|
| 272 |
+
"mri": "mi",
|
| 273 |
+
"msa": "ms",
|
| 274 |
+
"mya": "my",
|
| 275 |
+
"nau": "na",
|
| 276 |
+
"nav": "nv",
|
| 277 |
+
"nbl": "nr",
|
| 278 |
+
"nde": "nd",
|
| 279 |
+
"ndo": "ng",
|
| 280 |
+
"nep": "ne",
|
| 281 |
+
"nld": "nl",
|
| 282 |
+
"nno": "nn",
|
| 283 |
+
"nob": "nb",
|
| 284 |
+
"nor": "no",
|
| 285 |
+
"nya": "ny",
|
| 286 |
+
"oci": "oc",
|
| 287 |
+
"oji": "oj",
|
| 288 |
+
"ori": "or",
|
| 289 |
+
"orm": "om",
|
| 290 |
+
"oss": "os",
|
| 291 |
+
"pan": "pa",
|
| 292 |
+
"pli": "pi",
|
| 293 |
+
"pol": "pl",
|
| 294 |
+
"por": "pt",
|
| 295 |
+
"pus": "ps",
|
| 296 |
+
"que": "qu",
|
| 297 |
+
"roh": "rm",
|
| 298 |
+
"ron": "ro",
|
| 299 |
+
"run": "rn",
|
| 300 |
+
"rus": "ru",
|
| 301 |
+
"sag": "sg",
|
| 302 |
+
"san": "sa",
|
| 303 |
+
"sin": "si",
|
| 304 |
+
"slk": "sk",
|
| 305 |
+
"slv": "sl",
|
| 306 |
+
"sme": "se",
|
| 307 |
+
"smo": "sm",
|
| 308 |
+
"sna": "sn",
|
| 309 |
+
"snd": "sd",
|
| 310 |
+
"som": "so",
|
| 311 |
+
"sot": "st",
|
| 312 |
+
"spa": "es",
|
| 313 |
+
"sqi": "sq",
|
| 314 |
+
"srd": "sc",
|
| 315 |
+
"srp": "sr",
|
| 316 |
+
"ssw": "ss",
|
| 317 |
+
"sun": "su",
|
| 318 |
+
"swa": "sw",
|
| 319 |
+
"swe": "sv",
|
| 320 |
+
"tah": "ty",
|
| 321 |
+
"tam": "ta",
|
| 322 |
+
"tat": "tt",
|
| 323 |
+
"tel": "te",
|
| 324 |
+
"tgk": "tg",
|
| 325 |
+
"tgl": "tl",
|
| 326 |
+
"tha": "th",
|
| 327 |
+
"tir": "ti",
|
| 328 |
+
"ton": "to",
|
| 329 |
+
"tsn": "tn",
|
| 330 |
+
"tso": "ts",
|
| 331 |
+
"tuk": "tk",
|
| 332 |
+
"tur": "tr",
|
| 333 |
+
"twi": "tw",
|
| 334 |
+
"uig": "ug",
|
| 335 |
+
"ukr": "uk",
|
| 336 |
+
"urd": "ur",
|
| 337 |
+
"uzb": "uz",
|
| 338 |
+
"ven": "ve",
|
| 339 |
+
"vie": "vi",
|
| 340 |
+
"vol": "vo",
|
| 341 |
+
"wln": "wa",
|
| 342 |
+
"wol": "wo",
|
| 343 |
+
"xho": "xh",
|
| 344 |
+
"yid": "yi",
|
| 345 |
+
"yor": "yo",
|
| 346 |
+
"zha": "za",
|
| 347 |
+
"zho": "zh",
|
| 348 |
+
"zul": "zu",
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
iso639retired = {
|
| 353 |
+
"fri": "Western Frisian",
|
| 354 |
+
"auv": "Auvergnat",
|
| 355 |
+
"gsc": "Gascon",
|
| 356 |
+
"lms": "Limousin",
|
| 357 |
+
"lnc": "Languedocien",
|
| 358 |
+
"prv": "Provençal",
|
| 359 |
+
"amd": "Amapá Creole",
|
| 360 |
+
"bgh": "Bogan",
|
| 361 |
+
"bnh": "Banawá",
|
| 362 |
+
"bvs": "Belgian Sign Language",
|
| 363 |
+
"ccy": "Southern Zhuang",
|
| 364 |
+
"cit": "Chittagonian",
|
| 365 |
+
"flm": "Falam Chin",
|
| 366 |
+
"jap": "Jaruára",
|
| 367 |
+
"kob": "Kohoroxitari",
|
| 368 |
+
"mob": "Moinba",
|
| 369 |
+
"mzf": "Aiku",
|
| 370 |
+
"nhj": "Tlalitzlipa Nahuatl",
|
| 371 |
+
"nhs": "Southeastern Puebla Nahuatl",
|
| 372 |
+
"occ": "Occidental",
|
| 373 |
+
"tmx": "Tomyang",
|
| 374 |
+
"tot": "Patla-Chicontla Totonac",
|
| 375 |
+
"xmi": "Miarrã",
|
| 376 |
+
"yib": "Yinglish",
|
| 377 |
+
"ztc": "Lachirioag Zapotec",
|
| 378 |
+
"atf": "Atuence",
|
| 379 |
+
"bqe": "Navarro-Labourdin Basque",
|
| 380 |
+
"bsz": "Souletin Basque",
|
| 381 |
+
"aex": "Amerax",
|
| 382 |
+
"ahe": "Ahe",
|
| 383 |
+
"aiz": "Aari",
|
| 384 |
+
"akn": "Amikoana",
|
| 385 |
+
"arf": "Arafundi",
|
| 386 |
+
"azr": "Adzera",
|
| 387 |
+
"bcx": "Pamona",
|
| 388 |
+
"bii": "Bisu",
|
| 389 |
+
"bke": "Bengkulu",
|
| 390 |
+
"blu": "Hmong Njua",
|
| 391 |
+
"boc": "Bakung Kenyah",
|
| 392 |
+
"bsd": "Sarawak Bisaya",
|
| 393 |
+
"bwv": "Bahau River Kenyah",
|
| 394 |
+
"bxt": "Buxinhua",
|
| 395 |
+
"byu": "Buyang",
|
| 396 |
+
"ccx": "Northern Zhuang",
|
| 397 |
+
"cru": "Carútana",
|
| 398 |
+
"dat": "Darang Deng",
|
| 399 |
+
"dyk": "Land Dayak",
|
| 400 |
+
"eni": "Enim",
|
| 401 |
+
"fiz": "Izere",
|
| 402 |
+
"gen": "Geman Deng",
|
| 403 |
+
"ggh": "Garreh-Ajuran",
|
| 404 |
+
"itu": "Itutang",
|
| 405 |
+
"kds": "Lahu Shi",
|
| 406 |
+
"knh": "Kayan River Kenyah",
|
| 407 |
+
"krg": "North Korowai",
|
| 408 |
+
"krq": "Krui",
|
| 409 |
+
"kxg": "Katingan",
|
| 410 |
+
"lmt": "Lematang",
|
| 411 |
+
"lnt": "Lintang",
|
| 412 |
+
"lod": "Berawan",
|
| 413 |
+
"mbg": "Northern Nambikuára",
|
| 414 |
+
"mdo": "Southwest Gbaya",
|
| 415 |
+
"mhv": "Arakanese",
|
| 416 |
+
"miv": "Mimi",
|
| 417 |
+
"mqd": "Madang",
|
| 418 |
+
"nky": "Khiamniungan Naga",
|
| 419 |
+
"nxj": "Nyadu",
|
| 420 |
+
"ogn": "Ogan",
|
| 421 |
+
"ork": "Orokaiva",
|
| 422 |
+
"paj": "Ipeka-Tapuia",
|
| 423 |
+
"pec": "Southern Pesisir",
|
| 424 |
+
"pen": "Penesak",
|
| 425 |
+
"plm": "Palembang",
|
| 426 |
+
"poj": "Lower Pokomo",
|
| 427 |
+
"pun": "Pubian",
|
| 428 |
+
"rae": "Ranau",
|
| 429 |
+
"rjb": "Rajbanshi",
|
| 430 |
+
"rws": "Rawas",
|
| 431 |
+
"sdd": "Semendo",
|
| 432 |
+
"sdi": "Sindang Kelingi",
|
| 433 |
+
"skl": "Selako",
|
| 434 |
+
"slb": "Kahumamahon Saluan",
|
| 435 |
+
"srj": "Serawai",
|
| 436 |
+
"suf": "Tarpia",
|
| 437 |
+
"suh": "Suba",
|
| 438 |
+
"suu": "Sungkai",
|
| 439 |
+
"szk": "Sizaki",
|
| 440 |
+
"tle": "Southern Marakwet",
|
| 441 |
+
"tnj": "Tanjong",
|
| 442 |
+
"ttx": "Tutong 1",
|
| 443 |
+
"ubm": "Upper Baram Kenyah",
|
| 444 |
+
"vky": "Kayu Agung",
|
| 445 |
+
"vmo": "Muko-Muko",
|
| 446 |
+
"wre": "Ware",
|
| 447 |
+
"xah": "Kahayan",
|
| 448 |
+
"xkm": "Mahakam Kenyah",
|
| 449 |
+
"xuf": "Kunfal",
|
| 450 |
+
"yio": "Dayao Yi",
|
| 451 |
+
"ymj": "Muji Yi",
|
| 452 |
+
"ypl": "Pula Yi",
|
| 453 |
+
"ypw": "Puwa Yi",
|
| 454 |
+
"ywm": "Wumeng Yi",
|
| 455 |
+
"yym": "Yuanjiang-Mojiang Yi",
|
| 456 |
+
"mly": "Malay (individual language)",
|
| 457 |
+
"muw": "Mundari",
|
| 458 |
+
"xst": "Silt'e",
|
| 459 |
+
"ope": "Old Persian",
|
| 460 |
+
"scc": "Serbian",
|
| 461 |
+
"scr": "Croatian",
|
| 462 |
+
"xsk": "Sakan",
|
| 463 |
+
"mol": "Moldavian",
|
| 464 |
+
"aay": "Aariya",
|
| 465 |
+
"acc": "Cubulco Achí",
|
| 466 |
+
"cbm": "Yepocapa Southwestern Cakchiquel",
|
| 467 |
+
"chs": "Chumash",
|
| 468 |
+
"ckc": "Northern Cakchiquel",
|
| 469 |
+
"ckd": "South Central Cakchiquel",
|
| 470 |
+
"cke": "Eastern Cakchiquel",
|
| 471 |
+
"ckf": "Southern Cakchiquel",
|
| 472 |
+
"cki": "Santa María De Jesús Cakchiquel",
|
| 473 |
+
"ckj": "Santo Domingo Xenacoj Cakchiquel",
|
| 474 |
+
"ckk": "Acatenango Southwestern Cakchiquel",
|
| 475 |
+
"ckw": "Western Cakchiquel",
|
| 476 |
+
"cnm": "Ixtatán Chuj",
|
| 477 |
+
"cti": "Tila Chol",
|
| 478 |
+
"cun": "Cunén Quiché",
|
| 479 |
+
"eml": "Emiliano-Romagnolo",
|
| 480 |
+
"eur": "Europanto",
|
| 481 |
+
"gmo": "Gamo-Gofa-Dawro",
|
| 482 |
+
"hsf": "Southeastern Huastec",
|
| 483 |
+
"hva": "San Luís Potosí Huastec",
|
| 484 |
+
"ixi": "Nebaj Ixil",
|
| 485 |
+
"ixj": "Chajul Ixil",
|
| 486 |
+
"jai": "Western Jacalteco",
|
| 487 |
+
"mms": "Southern Mam",
|
| 488 |
+
"mpf": "Tajumulco Mam",
|
| 489 |
+
"mtz": "Tacanec",
|
| 490 |
+
"mvc": "Central Mam",
|
| 491 |
+
"mvj": "Todos Santos Cuchumatán Mam",
|
| 492 |
+
"poa": "Eastern Pokomam",
|
| 493 |
+
"pob": "Western Pokomchí",
|
| 494 |
+
"pou": "Southern Pokomam",
|
| 495 |
+
"ppv": "Papavô",
|
| 496 |
+
"quj": "Joyabaj Quiché",
|
| 497 |
+
"qut": "West Central Quiché",
|
| 498 |
+
"quu": "Eastern Quiché",
|
| 499 |
+
"qxi": "San Andrés Quiché",
|
| 500 |
+
"sic": "Malinguat",
|
| 501 |
+
"stc": "Santa Cruz",
|
| 502 |
+
"tlz": "Toala'",
|
| 503 |
+
"tzb": "Bachajón Tzeltal",
|
| 504 |
+
"tzc": "Chamula Tzotzil",
|
| 505 |
+
"tze": "Chenalhó Tzotzil",
|
| 506 |
+
"tzs": "San Andrés Larrainzar Tzotzil",
|
| 507 |
+
"tzt": "Western Tzutujil",
|
| 508 |
+
"tzu": "Huixtán Tzotzil",
|
| 509 |
+
"tzz": "Zinacantán Tzotzil",
|
| 510 |
+
"vlr": "Vatrata",
|
| 511 |
+
"yus": "Chan Santa Cruz Maya",
|
| 512 |
+
"nfg": "Nyeng",
|
| 513 |
+
"nfk": "Shakara",
|
| 514 |
+
"agp": "Paranan",
|
| 515 |
+
"bhk": "Albay Bicolano",
|
| 516 |
+
"bkb": "Finallig",
|
| 517 |
+
"btb": "Beti (Cameroon)",
|
| 518 |
+
"cjr": "Chorotega",
|
| 519 |
+
"cmk": "Chimakum",
|
| 520 |
+
"drh": "Darkhat",
|
| 521 |
+
"drw": "Darwazi",
|
| 522 |
+
"gav": "Gabutamon",
|
| 523 |
+
"mof": "Mohegan-Montauk-Narragansett",
|
| 524 |
+
"mst": "Cataelano Mandaya",
|
| 525 |
+
"myt": "Sangab Mandaya",
|
| 526 |
+
"rmr": "Caló",
|
| 527 |
+
"sgl": "Sanglechi-Ishkashimi",
|
| 528 |
+
"sul": "Surigaonon",
|
| 529 |
+
"sum": "Sumo-Mayangna",
|
| 530 |
+
"tnf": "Tangshewi",
|
| 531 |
+
"wgw": "Wagawaga",
|
| 532 |
+
"ayx": "Ayi (China)",
|
| 533 |
+
"bjq": "Southern Betsimisaraka Malagasy",
|
| 534 |
+
"dha": "Dhanwar (India)",
|
| 535 |
+
"dkl": "Kolum So Dogon",
|
| 536 |
+
"mja": "Mahei",
|
| 537 |
+
"nbf": "Naxi",
|
| 538 |
+
"noo": "Nootka",
|
| 539 |
+
"tie": "Tingal",
|
| 540 |
+
"tkk": "Takpa",
|
| 541 |
+
"baz": "Tunen",
|
| 542 |
+
"bjd": "Bandjigali",
|
| 543 |
+
"ccq": "Chaungtha",
|
| 544 |
+
"cka": "Khumi Awa Chin",
|
| 545 |
+
"dap": "Nisi (India)",
|
| 546 |
+
"dwl": "Walo Kumbe Dogon",
|
| 547 |
+
"elp": "Elpaputih",
|
| 548 |
+
"gbc": "Garawa",
|
| 549 |
+
"gio": "Gelao",
|
| 550 |
+
"hrr": "Horuru",
|
| 551 |
+
"ibi": "Ibilo",
|
| 552 |
+
"jar": "Jarawa (Nigeria)",
|
| 553 |
+
"kdv": "Kado",
|
| 554 |
+
"kgh": "Upper Tanudan Kalinga",
|
| 555 |
+
"kpp": "Paku Karen",
|
| 556 |
+
"kzh": "Kenuzi-Dongola",
|
| 557 |
+
"lcq": "Luhu",
|
| 558 |
+
"mgx": "Omati",
|
| 559 |
+
"nln": "Durango Nahuatl",
|
| 560 |
+
"pbz": "Palu",
|
| 561 |
+
"pgy": "Pongyong",
|
| 562 |
+
"sca": "Sansu",
|
| 563 |
+
"tlw": "South Wemale",
|
| 564 |
+
"unp": "Worora",
|
| 565 |
+
"wiw": "Wirangu",
|
| 566 |
+
"ybd": "Yangbye",
|
| 567 |
+
"yen": "Yendang",
|
| 568 |
+
"yma": "Yamphe",
|
| 569 |
+
"daf": "Dan",
|
| 570 |
+
"djl": "Djiwarli",
|
| 571 |
+
"ggr": "Aghu Tharnggalu",
|
| 572 |
+
"ilw": "Talur",
|
| 573 |
+
"izi": "Izi-Ezaa-Ikwo-Mgbo",
|
| 574 |
+
"meg": "Mea",
|
| 575 |
+
"mld": "Malakhel",
|
| 576 |
+
"mnt": "Maykulan",
|
| 577 |
+
"mwd": "Mudbura",
|
| 578 |
+
"myq": "Forest Maninka",
|
| 579 |
+
"nbx": "Ngura",
|
| 580 |
+
"nlr": "Ngarla",
|
| 581 |
+
"pcr": "Panang",
|
| 582 |
+
"ppr": "Piru",
|
| 583 |
+
"tgg": "Tangga",
|
| 584 |
+
"wit": "Wintu",
|
| 585 |
+
"xia": "Xiandao",
|
| 586 |
+
"yiy": "Yir Yoront",
|
| 587 |
+
"yos": "Yos",
|
| 588 |
+
"emo": "Emok",
|
| 589 |
+
"ggm": "Gugu Mini",
|
| 590 |
+
"leg": "Lengua",
|
| 591 |
+
"lmm": "Lamam",
|
| 592 |
+
"mhh": "Maskoy Pidgin",
|
| 593 |
+
"puz": "Purum Naga",
|
| 594 |
+
"sap": "Sanapaná",
|
| 595 |
+
"yuu": "Yugh",
|
| 596 |
+
"aam": "Aramanik",
|
| 597 |
+
"adp": "Adap",
|
| 598 |
+
"aue": "ǂKxʼauǁʼein",
|
| 599 |
+
"bmy": "Bemba (Democratic Republic of Congo)",
|
| 600 |
+
"bxx": "Borna (Democratic Republic of Congo)",
|
| 601 |
+
"byy": "Buya",
|
| 602 |
+
"dzd": "Daza",
|
| 603 |
+
"gfx": "Mangetti Dune ǃXung",
|
| 604 |
+
"gti": "Gbati-ri",
|
| 605 |
+
"ime": "Imeraguen",
|
| 606 |
+
"kbf": "Kakauhua",
|
| 607 |
+
"koj": "Sara Dunjo",
|
| 608 |
+
"kwq": "Kwak",
|
| 609 |
+
"kxe": "Kakihum",
|
| 610 |
+
"lii": "Lingkhim",
|
| 611 |
+
"mwj": "Maligo",
|
| 612 |
+
"nnx": "Ngong",
|
| 613 |
+
"oun": "ǃOǃung",
|
| 614 |
+
"pmu": "Mirpur Panjabi",
|
| 615 |
+
"sgo": "Songa",
|
| 616 |
+
"thx": "The",
|
| 617 |
+
"tsf": "Southwestern Tamang",
|
| 618 |
+
"uok": "Uokha",
|
| 619 |
+
"xsj": "Subi",
|
| 620 |
+
"yds": "Yiddish Sign Language",
|
| 621 |
+
"ymt": "Mator-Taygi-Karagas",
|
| 622 |
+
"ynh": "Yangho",
|
| 623 |
+
"bgm": "Baga Mboteni",
|
| 624 |
+
"btl": "Bhatola",
|
| 625 |
+
"cbe": "Chipiajes",
|
| 626 |
+
"cbh": "Cagua",
|
| 627 |
+
"coy": "Coyaima",
|
| 628 |
+
"cqu": "Chilean Quechua",
|
| 629 |
+
"cum": "Cumeral",
|
| 630 |
+
"duj": "Dhuwal",
|
| 631 |
+
"ggn": "Eastern Gurung",
|
| 632 |
+
"ggo": "Southern Gondi",
|
| 633 |
+
"guv": "Gey",
|
| 634 |
+
"iap": "Iapama",
|
| 635 |
+
"ill": "Iranun",
|
| 636 |
+
"kgc": "Kasseng",
|
| 637 |
+
"kox": "Coxima",
|
| 638 |
+
"ktr": "Kota Marudu Tinagas",
|
| 639 |
+
"kvs": "Kunggara",
|
| 640 |
+
"kzj": "Coastal Kadazan",
|
| 641 |
+
"kzt": "Tambunan Dusun",
|
| 642 |
+
"nad": "Nijadali",
|
| 643 |
+
"nts": "Natagaimas",
|
| 644 |
+
"ome": "Omejes",
|
| 645 |
+
"pmc": "Palumata",
|
| 646 |
+
"pod": "Ponares",
|
| 647 |
+
"ppa": "Pao",
|
| 648 |
+
"pry": "Pray 3",
|
| 649 |
+
"rna": "Runa",
|
| 650 |
+
"svr": "Savara",
|
| 651 |
+
"tdu": "Tempasuk Dusun",
|
| 652 |
+
"thc": "Tai Hang Tong",
|
| 653 |
+
"tid": "Tidong",
|
| 654 |
+
"tmp": "Tai Mène",
|
| 655 |
+
"tne": "Tinoc Kallahan",
|
| 656 |
+
"toe": "Tomedes",
|
| 657 |
+
"xba": "Kamba (Brazil)",
|
| 658 |
+
"xbx": "Kabixí",
|
| 659 |
+
"xip": "Xipináwa",
|
| 660 |
+
"xkh": "Karahawyana",
|
| 661 |
+
"yri": "Yarí",
|
| 662 |
+
"jeg": "Jeng",
|
| 663 |
+
"kgd": "Kataang",
|
| 664 |
+
"krm": "Krim",
|
| 665 |
+
"prb": "Lua'",
|
| 666 |
+
"puk": "Pu Ko",
|
| 667 |
+
"rie": "Rien",
|
| 668 |
+
"rsi": "Rennellese Sign Language",
|
| 669 |
+
"skk": "Sok",
|
| 670 |
+
"snh": "Shinabo",
|
| 671 |
+
"lsg": "Lyons Sign Language",
|
| 672 |
+
"mwx": "Mediak",
|
| 673 |
+
"mwy": "Mosiro",
|
| 674 |
+
"ncp": "Ndaktup",
|
| 675 |
+
"ais": "Nataoran Amis",
|
| 676 |
+
"asd": "Asas",
|
| 677 |
+
"dit": "Dirari",
|
| 678 |
+
"dud": "Hun-Saare",
|
| 679 |
+
"lba": "Lui",
|
| 680 |
+
"llo": "Khlor",
|
| 681 |
+
"myd": "Maramba",
|
| 682 |
+
"myi": "Mina (India)",
|
| 683 |
+
"nns": "Ningye",
|
| 684 |
+
"aoh": "Arma",
|
| 685 |
+
"ayy": "Tayabas Ayta",
|
| 686 |
+
"bbz": "Babalia Creole Arabic",
|
| 687 |
+
"bpb": "Barbacoas",
|
| 688 |
+
"cca": "Cauca",
|
| 689 |
+
"cdg": "Chamari",
|
| 690 |
+
"dgu": "Degaru",
|
| 691 |
+
"drr": "Dororo",
|
| 692 |
+
"ekc": "Eastern Karnic",
|
| 693 |
+
"gli": "Guliguli",
|
| 694 |
+
"kjf": "Khalaj",
|
| 695 |
+
"kxl": "Nepali Kurux",
|
| 696 |
+
"kxu": "Kui (India)",
|
| 697 |
+
"lmz": "Lumbee",
|
| 698 |
+
"nxu": "Narau",
|
| 699 |
+
"plp": "Palpa",
|
| 700 |
+
"sdm": "Semandang",
|
| 701 |
+
"tbb": "Tapeba",
|
| 702 |
+
"xrq": "Karranga",
|
| 703 |
+
"xtz": "Tasmanian",
|
| 704 |
+
"zir": "Ziriya",
|
| 705 |
+
"thw": "Thudam",
|
| 706 |
+
"bic": "Bikaru",
|
| 707 |
+
"bij": "Vaghat-Ya-Bijim-Legeri",
|
| 708 |
+
"blg": "Balau",
|
| 709 |
+
"gji": "Geji",
|
| 710 |
+
"mvm": "Muya",
|
| 711 |
+
"ngo": "Ngoni",
|
| 712 |
+
"pat": "Papitalai",
|
| 713 |
+
"vki": "Ija-Zuba",
|
| 714 |
+
"wra": "Warapu",
|
| 715 |
+
"ajt": "Judeo-Tunisian Arabic",
|
| 716 |
+
"cug": "Chungmboko",
|
| 717 |
+
"lak": "Laka (Nigeria)",
|
| 718 |
+
"lno": "Lango (South Sudan)",
|
| 719 |
+
"pii": "Pini",
|
| 720 |
+
"smd": "Sama",
|
| 721 |
+
"snb": "Sebuyau",
|
| 722 |
+
"uun": "Kulon-Pazeh",
|
| 723 |
+
"wrd": "Warduji",
|
| 724 |
+
"wya": "Wyandot",
|
| 725 |
+
}
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
iso639long = inverse_dict(iso639short)
|
| 729 |
+
|
| 730 |
+
iso639code_retired = inverse_dict(iso639retired)
|
lib/python3.10/site-packages/nltk/lazyimport.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This module is from mx/DateTime/LazyModule.py and is
|
| 2 |
+
# distributed under the terms of the eGenix.com Public License Agreement
|
| 3 |
+
# https://www.egenix.com/products/eGenix.com-Public-License-1.1.0.pdf
|
| 4 |
+
|
| 5 |
+
""" Helper to enable simple lazy module import.
|
| 6 |
+
|
| 7 |
+
'Lazy' means the actual import is deferred until an attribute is
|
| 8 |
+
requested from the module's namespace. This has the advantage of
|
| 9 |
+
allowing all imports to be done at the top of a script (in a
|
| 10 |
+
prominent and visible place) without having a great impact
|
| 11 |
+
on startup time.
|
| 12 |
+
|
| 13 |
+
Copyright (c) 1999-2005, Marc-Andre Lemburg; mailto:mal@lemburg.com
|
| 14 |
+
See the documentation for further information on copyrights,
|
| 15 |
+
or contact the author. All Rights Reserved.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
### Constants
|
| 19 |
+
|
| 20 |
+
_debug = 0
|
| 21 |
+
|
| 22 |
+
###
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class LazyModule:
|
| 26 |
+
|
| 27 |
+
"""Lazy module class.
|
| 28 |
+
|
| 29 |
+
Lazy modules are imported into the given namespaces whenever a
|
| 30 |
+
non-special attribute (there are some attributes like __doc__
|
| 31 |
+
that class instances handle without calling __getattr__) is
|
| 32 |
+
requested. The module is then registered under the given name
|
| 33 |
+
in locals usually replacing the import wrapper instance. The
|
| 34 |
+
import itself is done using globals as global namespace.
|
| 35 |
+
|
| 36 |
+
Example of creating a lazy load module:
|
| 37 |
+
|
| 38 |
+
ISO = LazyModule('ISO',locals(),globals())
|
| 39 |
+
|
| 40 |
+
Later, requesting an attribute from ISO will load the module
|
| 41 |
+
automatically into the locals() namespace, overriding the
|
| 42 |
+
LazyModule instance:
|
| 43 |
+
|
| 44 |
+
t = ISO.Week(1998,1,1)
|
| 45 |
+
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
# Flag which indicates whether the LazyModule is initialized or not
|
| 49 |
+
__lazymodule_init = 0
|
| 50 |
+
|
| 51 |
+
# Name of the module to load
|
| 52 |
+
__lazymodule_name = ""
|
| 53 |
+
|
| 54 |
+
# Flag which indicates whether the module was loaded or not
|
| 55 |
+
__lazymodule_loaded = 0
|
| 56 |
+
|
| 57 |
+
# Locals dictionary where to register the module
|
| 58 |
+
__lazymodule_locals = None
|
| 59 |
+
|
| 60 |
+
# Globals dictionary to use for the module import
|
| 61 |
+
__lazymodule_globals = None
|
| 62 |
+
|
| 63 |
+
def __init__(self, name, locals, globals=None):
|
| 64 |
+
|
| 65 |
+
"""Create a LazyModule instance wrapping module name.
|
| 66 |
+
|
| 67 |
+
The module will later on be registered in locals under the
|
| 68 |
+
given module name.
|
| 69 |
+
|
| 70 |
+
globals is optional and defaults to locals.
|
| 71 |
+
|
| 72 |
+
"""
|
| 73 |
+
self.__lazymodule_locals = locals
|
| 74 |
+
if globals is None:
|
| 75 |
+
globals = locals
|
| 76 |
+
self.__lazymodule_globals = globals
|
| 77 |
+
mainname = globals.get("__name__", "")
|
| 78 |
+
if mainname:
|
| 79 |
+
self.__name__ = mainname + "." + name
|
| 80 |
+
self.__lazymodule_name = name
|
| 81 |
+
else:
|
| 82 |
+
self.__name__ = self.__lazymodule_name = name
|
| 83 |
+
self.__lazymodule_init = 1
|
| 84 |
+
|
| 85 |
+
def __lazymodule_import(self):
|
| 86 |
+
|
| 87 |
+
"""Import the module now."""
|
| 88 |
+
# Load and register module
|
| 89 |
+
local_name = self.__lazymodule_name # e.g. "toolbox"
|
| 90 |
+
full_name = self.__name__ # e.g. "nltk.toolbox"
|
| 91 |
+
if self.__lazymodule_loaded:
|
| 92 |
+
return self.__lazymodule_locals[local_name]
|
| 93 |
+
if _debug:
|
| 94 |
+
print("LazyModule: Loading module %r" % full_name)
|
| 95 |
+
self.__lazymodule_locals[local_name] = module = __import__(
|
| 96 |
+
full_name, self.__lazymodule_locals, self.__lazymodule_globals, "*"
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
# Fill namespace with all symbols from original module to
|
| 100 |
+
# provide faster access.
|
| 101 |
+
self.__dict__.update(module.__dict__)
|
| 102 |
+
|
| 103 |
+
# Set import flag
|
| 104 |
+
self.__dict__["__lazymodule_loaded"] = 1
|
| 105 |
+
|
| 106 |
+
if _debug:
|
| 107 |
+
print("LazyModule: Module %r loaded" % full_name)
|
| 108 |
+
return module
|
| 109 |
+
|
| 110 |
+
def __getattr__(self, name):
|
| 111 |
+
|
| 112 |
+
"""Import the module on demand and get the attribute."""
|
| 113 |
+
if self.__lazymodule_loaded:
|
| 114 |
+
raise AttributeError(name)
|
| 115 |
+
if _debug:
|
| 116 |
+
print(
|
| 117 |
+
"LazyModule: "
|
| 118 |
+
"Module load triggered by attribute %r read access" % name
|
| 119 |
+
)
|
| 120 |
+
module = self.__lazymodule_import()
|
| 121 |
+
return getattr(module, name)
|
| 122 |
+
|
| 123 |
+
def __setattr__(self, name, value):
|
| 124 |
+
|
| 125 |
+
"""Import the module on demand and set the attribute."""
|
| 126 |
+
if not self.__lazymodule_init:
|
| 127 |
+
self.__dict__[name] = value
|
| 128 |
+
return
|
| 129 |
+
if self.__lazymodule_loaded:
|
| 130 |
+
self.__lazymodule_locals[self.__lazymodule_name] = value
|
| 131 |
+
self.__dict__[name] = value
|
| 132 |
+
return
|
| 133 |
+
if _debug:
|
| 134 |
+
print(
|
| 135 |
+
"LazyModule: "
|
| 136 |
+
"Module load triggered by attribute %r write access" % name
|
| 137 |
+
)
|
| 138 |
+
module = self.__lazymodule_import()
|
| 139 |
+
setattr(module, name, value)
|
| 140 |
+
|
| 141 |
+
def __repr__(self):
|
| 142 |
+
return "<LazyModule '%s'>" % self.__name__
|
lib/python3.10/site-packages/nltk/probability.py
ADDED
|
@@ -0,0 +1,2578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Natural Language Toolkit: Probability and Statistics
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
| 4 |
+
# Author: Edward Loper <edloper@gmail.com>
|
| 5 |
+
# Steven Bird <stevenbird1@gmail.com> (additions)
|
| 6 |
+
# Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
|
| 7 |
+
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se> (additions)
|
| 8 |
+
# Liang Dong <ldong@clemson.edu> (additions)
|
| 9 |
+
# Geoffrey Sampson <sampson@cantab.net> (additions)
|
| 10 |
+
# Ilia Kurenkov <ilia.kurenkov@gmail.com> (additions)
|
| 11 |
+
#
|
| 12 |
+
# URL: <https://www.nltk.org/>
|
| 13 |
+
# For license information, see LICENSE.TXT
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
Classes for representing and processing probabilistic information.
|
| 17 |
+
|
| 18 |
+
The ``FreqDist`` class is used to encode "frequency distributions",
|
| 19 |
+
which count the number of times that each outcome of an experiment
|
| 20 |
+
occurs.
|
| 21 |
+
|
| 22 |
+
The ``ProbDistI`` class defines a standard interface for "probability
|
| 23 |
+
distributions", which encode the probability of each outcome for an
|
| 24 |
+
experiment. There are two types of probability distribution:
|
| 25 |
+
|
| 26 |
+
- "derived probability distributions" are created from frequency
|
| 27 |
+
distributions. They attempt to model the probability distribution
|
| 28 |
+
that generated the frequency distribution.
|
| 29 |
+
- "analytic probability distributions" are created directly from
|
| 30 |
+
parameters (such as variance).
|
| 31 |
+
|
| 32 |
+
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
|
| 33 |
+
are used to encode conditional distributions. Conditional probability
|
| 34 |
+
distributions can be derived or analytic; but currently the only
|
| 35 |
+
implementation of the ``ConditionalProbDistI`` interface is
|
| 36 |
+
``ConditionalProbDist``, a derived distribution.
|
| 37 |
+
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
import array
|
| 41 |
+
import math
|
| 42 |
+
import random
|
| 43 |
+
import warnings
|
| 44 |
+
from abc import ABCMeta, abstractmethod
|
| 45 |
+
from collections import Counter, defaultdict
|
| 46 |
+
from functools import reduce
|
| 47 |
+
|
| 48 |
+
from nltk.internals import raise_unorderable_types
|
| 49 |
+
|
| 50 |
+
_NINF = float("-1e300")
|
| 51 |
+
|
| 52 |
+
##//////////////////////////////////////////////////////
|
| 53 |
+
## Frequency Distributions
|
| 54 |
+
##//////////////////////////////////////////////////////
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class FreqDist(Counter):
|
| 58 |
+
"""
|
| 59 |
+
A frequency distribution for the outcomes of an experiment. A
|
| 60 |
+
frequency distribution records the number of times each outcome of
|
| 61 |
+
an experiment has occurred. For example, a frequency distribution
|
| 62 |
+
could be used to record the frequency of each word type in a
|
| 63 |
+
document. Formally, a frequency distribution can be defined as a
|
| 64 |
+
function mapping from each sample to the number of times that
|
| 65 |
+
sample occurred as an outcome.
|
| 66 |
+
|
| 67 |
+
Frequency distributions are generally constructed by running a
|
| 68 |
+
number of experiments, and incrementing the count for a sample
|
| 69 |
+
every time it is an outcome of an experiment. For example, the
|
| 70 |
+
following code will produce a frequency distribution that encodes
|
| 71 |
+
how often each word occurs in a text:
|
| 72 |
+
|
| 73 |
+
>>> from nltk.tokenize import word_tokenize
|
| 74 |
+
>>> from nltk.probability import FreqDist
|
| 75 |
+
>>> sent = 'This is an example sentence'
|
| 76 |
+
>>> fdist = FreqDist()
|
| 77 |
+
>>> for word in word_tokenize(sent):
|
| 78 |
+
... fdist[word.lower()] += 1
|
| 79 |
+
|
| 80 |
+
An equivalent way to do this is with the initializer:
|
| 81 |
+
|
| 82 |
+
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
|
| 83 |
+
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
def __init__(self, samples=None):
|
| 87 |
+
"""
|
| 88 |
+
Construct a new frequency distribution. If ``samples`` is
|
| 89 |
+
given, then the frequency distribution will be initialized
|
| 90 |
+
with the count of each object in ``samples``; otherwise, it
|
| 91 |
+
will be initialized to be empty.
|
| 92 |
+
|
| 93 |
+
In particular, ``FreqDist()`` returns an empty frequency
|
| 94 |
+
distribution; and ``FreqDist(samples)`` first creates an empty
|
| 95 |
+
frequency distribution, and then calls ``update`` with the
|
| 96 |
+
list ``samples``.
|
| 97 |
+
|
| 98 |
+
:param samples: The samples to initialize the frequency
|
| 99 |
+
distribution with.
|
| 100 |
+
:type samples: Sequence
|
| 101 |
+
"""
|
| 102 |
+
Counter.__init__(self, samples)
|
| 103 |
+
|
| 104 |
+
# Cached number of samples in this FreqDist
|
| 105 |
+
self._N = None
|
| 106 |
+
|
| 107 |
+
def N(self):
|
| 108 |
+
"""
|
| 109 |
+
Return the total number of sample outcomes that have been
|
| 110 |
+
recorded by this FreqDist. For the number of unique
|
| 111 |
+
sample values (or bins) with counts greater than zero, use
|
| 112 |
+
``FreqDist.B()``.
|
| 113 |
+
|
| 114 |
+
:rtype: int
|
| 115 |
+
"""
|
| 116 |
+
if self._N is None:
|
| 117 |
+
# Not already cached, or cache has been invalidated
|
| 118 |
+
self._N = sum(self.values())
|
| 119 |
+
return self._N
|
| 120 |
+
|
| 121 |
+
def __setitem__(self, key, val):
|
| 122 |
+
"""
|
| 123 |
+
Override ``Counter.__setitem__()`` to invalidate the cached N
|
| 124 |
+
"""
|
| 125 |
+
self._N = None
|
| 126 |
+
super().__setitem__(key, val)
|
| 127 |
+
|
| 128 |
+
def __delitem__(self, key):
|
| 129 |
+
"""
|
| 130 |
+
Override ``Counter.__delitem__()`` to invalidate the cached N
|
| 131 |
+
"""
|
| 132 |
+
self._N = None
|
| 133 |
+
super().__delitem__(key)
|
| 134 |
+
|
| 135 |
+
def update(self, *args, **kwargs):
|
| 136 |
+
"""
|
| 137 |
+
Override ``Counter.update()`` to invalidate the cached N
|
| 138 |
+
"""
|
| 139 |
+
self._N = None
|
| 140 |
+
super().update(*args, **kwargs)
|
| 141 |
+
|
| 142 |
+
def setdefault(self, key, val):
|
| 143 |
+
"""
|
| 144 |
+
Override ``Counter.setdefault()`` to invalidate the cached N
|
| 145 |
+
"""
|
| 146 |
+
self._N = None
|
| 147 |
+
super().setdefault(key, val)
|
| 148 |
+
|
| 149 |
+
def B(self):
|
| 150 |
+
"""
|
| 151 |
+
Return the total number of sample values (or "bins") that
|
| 152 |
+
have counts greater than zero. For the total
|
| 153 |
+
number of sample outcomes recorded, use ``FreqDist.N()``.
|
| 154 |
+
(FreqDist.B() is the same as len(FreqDist).)
|
| 155 |
+
|
| 156 |
+
:rtype: int
|
| 157 |
+
"""
|
| 158 |
+
return len(self)
|
| 159 |
+
|
| 160 |
+
def hapaxes(self):
|
| 161 |
+
"""
|
| 162 |
+
Return a list of all samples that occur once (hapax legomena)
|
| 163 |
+
|
| 164 |
+
:rtype: list
|
| 165 |
+
"""
|
| 166 |
+
return [item for item in self if self[item] == 1]
|
| 167 |
+
|
| 168 |
+
def Nr(self, r, bins=None):
|
| 169 |
+
return self.r_Nr(bins)[r]
|
| 170 |
+
|
| 171 |
+
def r_Nr(self, bins=None):
|
| 172 |
+
"""
|
| 173 |
+
Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0.
|
| 174 |
+
|
| 175 |
+
:type bins: int
|
| 176 |
+
:param bins: The number of possible sample outcomes. ``bins``
|
| 177 |
+
is used to calculate Nr(0). In particular, Nr(0) is
|
| 178 |
+
``bins-self.B()``. If ``bins`` is not specified, it
|
| 179 |
+
defaults to ``self.B()`` (so Nr(0) will be 0).
|
| 180 |
+
:rtype: int
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
_r_Nr = defaultdict(int)
|
| 184 |
+
for count in self.values():
|
| 185 |
+
_r_Nr[count] += 1
|
| 186 |
+
|
| 187 |
+
# Special case for Nr[0]:
|
| 188 |
+
_r_Nr[0] = bins - self.B() if bins is not None else 0
|
| 189 |
+
|
| 190 |
+
return _r_Nr
|
| 191 |
+
|
| 192 |
+
def _cumulative_frequencies(self, samples):
|
| 193 |
+
"""
|
| 194 |
+
Return the cumulative frequencies of the specified samples.
|
| 195 |
+
If no samples are specified, all counts are returned, starting
|
| 196 |
+
with the largest.
|
| 197 |
+
|
| 198 |
+
:param samples: the samples whose frequencies should be returned.
|
| 199 |
+
:type samples: any
|
| 200 |
+
:rtype: list(float)
|
| 201 |
+
"""
|
| 202 |
+
cf = 0.0
|
| 203 |
+
for sample in samples:
|
| 204 |
+
cf += self[sample]
|
| 205 |
+
yield cf
|
| 206 |
+
|
| 207 |
+
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
|
| 208 |
+
# here, freq() does probs
|
| 209 |
+
def freq(self, sample):
|
| 210 |
+
"""
|
| 211 |
+
Return the frequency of a given sample. The frequency of a
|
| 212 |
+
sample is defined as the count of that sample divided by the
|
| 213 |
+
total number of sample outcomes that have been recorded by
|
| 214 |
+
this FreqDist. The count of a sample is defined as the
|
| 215 |
+
number of times that sample outcome was recorded by this
|
| 216 |
+
FreqDist. Frequencies are always real numbers in the range
|
| 217 |
+
[0, 1].
|
| 218 |
+
|
| 219 |
+
:param sample: the sample whose frequency
|
| 220 |
+
should be returned.
|
| 221 |
+
:type sample: any
|
| 222 |
+
:rtype: float
|
| 223 |
+
"""
|
| 224 |
+
n = self.N()
|
| 225 |
+
if n == 0:
|
| 226 |
+
return 0
|
| 227 |
+
return self[sample] / n
|
| 228 |
+
|
| 229 |
+
def max(self):
|
| 230 |
+
"""
|
| 231 |
+
Return the sample with the greatest number of outcomes in this
|
| 232 |
+
frequency distribution. If two or more samples have the same
|
| 233 |
+
number of outcomes, return one of them; which sample is
|
| 234 |
+
returned is undefined. If no outcomes have occurred in this
|
| 235 |
+
frequency distribution, return None.
|
| 236 |
+
|
| 237 |
+
:return: The sample with the maximum number of outcomes in this
|
| 238 |
+
frequency distribution.
|
| 239 |
+
:rtype: any or None
|
| 240 |
+
"""
|
| 241 |
+
if len(self) == 0:
|
| 242 |
+
raise ValueError(
|
| 243 |
+
"A FreqDist must have at least one sample before max is defined."
|
| 244 |
+
)
|
| 245 |
+
return self.most_common(1)[0][0]
|
| 246 |
+
|
| 247 |
+
def plot(
|
| 248 |
+
self, *args, title="", cumulative=False, percents=False, show=True, **kwargs
|
| 249 |
+
):
|
| 250 |
+
"""
|
| 251 |
+
Plot samples from the frequency distribution
|
| 252 |
+
displaying the most frequent sample first. If an integer
|
| 253 |
+
parameter is supplied, stop after this many samples have been
|
| 254 |
+
plotted. For a cumulative plot, specify cumulative=True. Additional
|
| 255 |
+
``**kwargs`` are passed to matplotlib's plot function.
|
| 256 |
+
(Requires Matplotlib to be installed.)
|
| 257 |
+
|
| 258 |
+
:param title: The title for the graph.
|
| 259 |
+
:type title: str
|
| 260 |
+
:param cumulative: Whether the plot is cumulative. (default = False)
|
| 261 |
+
:type cumulative: bool
|
| 262 |
+
:param percents: Whether the plot uses percents instead of counts. (default = False)
|
| 263 |
+
:type percents: bool
|
| 264 |
+
:param show: Whether to show the plot, or only return the ax.
|
| 265 |
+
:type show: bool
|
| 266 |
+
"""
|
| 267 |
+
try:
|
| 268 |
+
import matplotlib.pyplot as plt
|
| 269 |
+
except ImportError as e:
|
| 270 |
+
raise ValueError(
|
| 271 |
+
"The plot function requires matplotlib to be installed."
|
| 272 |
+
"See https://matplotlib.org/"
|
| 273 |
+
) from e
|
| 274 |
+
|
| 275 |
+
if len(args) == 0:
|
| 276 |
+
args = [len(self)]
|
| 277 |
+
samples = [item for item, _ in self.most_common(*args)]
|
| 278 |
+
|
| 279 |
+
if cumulative:
|
| 280 |
+
freqs = list(self._cumulative_frequencies(samples))
|
| 281 |
+
ylabel = "Cumulative "
|
| 282 |
+
else:
|
| 283 |
+
freqs = [self[sample] for sample in samples]
|
| 284 |
+
ylabel = ""
|
| 285 |
+
|
| 286 |
+
if percents:
|
| 287 |
+
freqs = [f / self.N() * 100 for f in freqs]
|
| 288 |
+
ylabel += "Percents"
|
| 289 |
+
else:
|
| 290 |
+
ylabel += "Counts"
|
| 291 |
+
|
| 292 |
+
ax = plt.gca()
|
| 293 |
+
ax.grid(True, color="silver")
|
| 294 |
+
|
| 295 |
+
if "linewidth" not in kwargs:
|
| 296 |
+
kwargs["linewidth"] = 2
|
| 297 |
+
if title:
|
| 298 |
+
ax.set_title(title)
|
| 299 |
+
|
| 300 |
+
ax.plot(freqs, **kwargs)
|
| 301 |
+
ax.set_xticks(range(len(samples)))
|
| 302 |
+
ax.set_xticklabels([str(s) for s in samples], rotation=90)
|
| 303 |
+
ax.set_xlabel("Samples")
|
| 304 |
+
ax.set_ylabel(ylabel)
|
| 305 |
+
|
| 306 |
+
if show:
|
| 307 |
+
plt.show()
|
| 308 |
+
|
| 309 |
+
return ax
|
| 310 |
+
|
| 311 |
+
def tabulate(self, *args, **kwargs):
|
| 312 |
+
"""
|
| 313 |
+
Tabulate the given samples from the frequency distribution (cumulative),
|
| 314 |
+
displaying the most frequent sample first. If an integer
|
| 315 |
+
parameter is supplied, stop after this many samples have been
|
| 316 |
+
plotted.
|
| 317 |
+
|
| 318 |
+
:param samples: The samples to plot (default is all samples)
|
| 319 |
+
:type samples: list
|
| 320 |
+
:param cumulative: A flag to specify whether the freqs are cumulative (default = False)
|
| 321 |
+
:type title: bool
|
| 322 |
+
"""
|
| 323 |
+
if len(args) == 0:
|
| 324 |
+
args = [len(self)]
|
| 325 |
+
samples = _get_kwarg(
|
| 326 |
+
kwargs, "samples", [item for item, _ in self.most_common(*args)]
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
cumulative = _get_kwarg(kwargs, "cumulative", False)
|
| 330 |
+
if cumulative:
|
| 331 |
+
freqs = list(self._cumulative_frequencies(samples))
|
| 332 |
+
else:
|
| 333 |
+
freqs = [self[sample] for sample in samples]
|
| 334 |
+
# percents = [f * 100 for f in freqs] only in ProbDist?
|
| 335 |
+
|
| 336 |
+
width = max(len(f"{s}") for s in samples)
|
| 337 |
+
width = max(width, max(len("%d" % f) for f in freqs))
|
| 338 |
+
|
| 339 |
+
for i in range(len(samples)):
|
| 340 |
+
print("%*s" % (width, samples[i]), end=" ")
|
| 341 |
+
print()
|
| 342 |
+
for i in range(len(samples)):
|
| 343 |
+
print("%*d" % (width, freqs[i]), end=" ")
|
| 344 |
+
print()
|
| 345 |
+
|
| 346 |
+
def copy(self):
|
| 347 |
+
"""
|
| 348 |
+
Create a copy of this frequency distribution.
|
| 349 |
+
|
| 350 |
+
:rtype: FreqDist
|
| 351 |
+
"""
|
| 352 |
+
return self.__class__(self)
|
| 353 |
+
|
| 354 |
+
# Mathematical operatiors
|
| 355 |
+
|
| 356 |
+
def __add__(self, other):
|
| 357 |
+
"""
|
| 358 |
+
Add counts from two counters.
|
| 359 |
+
|
| 360 |
+
>>> FreqDist('abbb') + FreqDist('bcc')
|
| 361 |
+
FreqDist({'b': 4, 'c': 2, 'a': 1})
|
| 362 |
+
|
| 363 |
+
"""
|
| 364 |
+
return self.__class__(super().__add__(other))
|
| 365 |
+
|
| 366 |
+
def __sub__(self, other):
|
| 367 |
+
"""
|
| 368 |
+
Subtract count, but keep only results with positive counts.
|
| 369 |
+
|
| 370 |
+
>>> FreqDist('abbbc') - FreqDist('bccd')
|
| 371 |
+
FreqDist({'b': 2, 'a': 1})
|
| 372 |
+
|
| 373 |
+
"""
|
| 374 |
+
return self.__class__(super().__sub__(other))
|
| 375 |
+
|
| 376 |
+
def __or__(self, other):
|
| 377 |
+
"""
|
| 378 |
+
Union is the maximum of value in either of the input counters.
|
| 379 |
+
|
| 380 |
+
>>> FreqDist('abbb') | FreqDist('bcc')
|
| 381 |
+
FreqDist({'b': 3, 'c': 2, 'a': 1})
|
| 382 |
+
|
| 383 |
+
"""
|
| 384 |
+
return self.__class__(super().__or__(other))
|
| 385 |
+
|
| 386 |
+
def __and__(self, other):
|
| 387 |
+
"""
|
| 388 |
+
Intersection is the minimum of corresponding counts.
|
| 389 |
+
|
| 390 |
+
>>> FreqDist('abbb') & FreqDist('bcc')
|
| 391 |
+
FreqDist({'b': 1})
|
| 392 |
+
|
| 393 |
+
"""
|
| 394 |
+
return self.__class__(super().__and__(other))
|
| 395 |
+
|
| 396 |
+
def __le__(self, other):
|
| 397 |
+
"""
|
| 398 |
+
Returns True if this frequency distribution is a subset of the other
|
| 399 |
+
and for no key the value exceeds the value of the same key from
|
| 400 |
+
the other frequency distribution.
|
| 401 |
+
|
| 402 |
+
The <= operator forms partial order and satisfying the axioms
|
| 403 |
+
reflexivity, antisymmetry and transitivity.
|
| 404 |
+
|
| 405 |
+
>>> FreqDist('a') <= FreqDist('a')
|
| 406 |
+
True
|
| 407 |
+
>>> a = FreqDist('abc')
|
| 408 |
+
>>> b = FreqDist('aabc')
|
| 409 |
+
>>> (a <= b, b <= a)
|
| 410 |
+
(True, False)
|
| 411 |
+
>>> FreqDist('a') <= FreqDist('abcd')
|
| 412 |
+
True
|
| 413 |
+
>>> FreqDist('abc') <= FreqDist('xyz')
|
| 414 |
+
False
|
| 415 |
+
>>> FreqDist('xyz') <= FreqDist('abc')
|
| 416 |
+
False
|
| 417 |
+
>>> c = FreqDist('a')
|
| 418 |
+
>>> d = FreqDist('aa')
|
| 419 |
+
>>> e = FreqDist('aaa')
|
| 420 |
+
>>> c <= d and d <= e and c <= e
|
| 421 |
+
True
|
| 422 |
+
"""
|
| 423 |
+
if not isinstance(other, FreqDist):
|
| 424 |
+
raise_unorderable_types("<=", self, other)
|
| 425 |
+
return set(self).issubset(other) and all(
|
| 426 |
+
self[key] <= other[key] for key in self
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
def __ge__(self, other):
|
| 430 |
+
if not isinstance(other, FreqDist):
|
| 431 |
+
raise_unorderable_types(">=", self, other)
|
| 432 |
+
return set(self).issuperset(other) and all(
|
| 433 |
+
self[key] >= other[key] for key in other
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
__lt__ = lambda self, other: self <= other and not self == other
|
| 437 |
+
__gt__ = lambda self, other: self >= other and not self == other
|
| 438 |
+
|
| 439 |
+
def __repr__(self):
|
| 440 |
+
"""
|
| 441 |
+
Return a string representation of this FreqDist.
|
| 442 |
+
|
| 443 |
+
:rtype: string
|
| 444 |
+
"""
|
| 445 |
+
return self.pformat()
|
| 446 |
+
|
| 447 |
+
def pprint(self, maxlen=10, stream=None):
|
| 448 |
+
"""
|
| 449 |
+
Print a string representation of this FreqDist to 'stream'
|
| 450 |
+
|
| 451 |
+
:param maxlen: The maximum number of items to print
|
| 452 |
+
:type maxlen: int
|
| 453 |
+
:param stream: The stream to print to. stdout by default
|
| 454 |
+
"""
|
| 455 |
+
print(self.pformat(maxlen=maxlen), file=stream)
|
| 456 |
+
|
| 457 |
+
def pformat(self, maxlen=10):
|
| 458 |
+
"""
|
| 459 |
+
Return a string representation of this FreqDist.
|
| 460 |
+
|
| 461 |
+
:param maxlen: The maximum number of items to display
|
| 462 |
+
:type maxlen: int
|
| 463 |
+
:rtype: string
|
| 464 |
+
"""
|
| 465 |
+
items = ["{!r}: {!r}".format(*item) for item in self.most_common(maxlen)]
|
| 466 |
+
if len(self) > maxlen:
|
| 467 |
+
items.append("...")
|
| 468 |
+
return "FreqDist({{{0}}})".format(", ".join(items))
|
| 469 |
+
|
| 470 |
+
def __str__(self):
|
| 471 |
+
"""
|
| 472 |
+
Return a string representation of this FreqDist.
|
| 473 |
+
|
| 474 |
+
:rtype: string
|
| 475 |
+
"""
|
| 476 |
+
return "<FreqDist with %d samples and %d outcomes>" % (len(self), self.N())
|
| 477 |
+
|
| 478 |
+
def __iter__(self):
|
| 479 |
+
"""
|
| 480 |
+
Return an iterator which yields tokens ordered by frequency.
|
| 481 |
+
|
| 482 |
+
:rtype: iterator
|
| 483 |
+
"""
|
| 484 |
+
for token, _ in self.most_common(self.B()):
|
| 485 |
+
yield token
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
##//////////////////////////////////////////////////////
|
| 489 |
+
## Probability Distributions
|
| 490 |
+
##//////////////////////////////////////////////////////
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
class ProbDistI(metaclass=ABCMeta):
|
| 494 |
+
"""
|
| 495 |
+
A probability distribution for the outcomes of an experiment. A
|
| 496 |
+
probability distribution specifies how likely it is that an
|
| 497 |
+
experiment will have any given outcome. For example, a
|
| 498 |
+
probability distribution could be used to predict the probability
|
| 499 |
+
that a token in a document will have a given type. Formally, a
|
| 500 |
+
probability distribution can be defined as a function mapping from
|
| 501 |
+
samples to nonnegative real numbers, such that the sum of every
|
| 502 |
+
number in the function's range is 1.0. A ``ProbDist`` is often
|
| 503 |
+
used to model the probability distribution of the experiment used
|
| 504 |
+
to generate a frequency distribution.
|
| 505 |
+
"""
|
| 506 |
+
|
| 507 |
+
SUM_TO_ONE = True
|
| 508 |
+
"""True if the probabilities of the samples in this probability
|
| 509 |
+
distribution will always sum to one."""
|
| 510 |
+
|
| 511 |
+
@abstractmethod
|
| 512 |
+
def __init__(self):
|
| 513 |
+
"""
|
| 514 |
+
Classes inheriting from ProbDistI should implement __init__.
|
| 515 |
+
"""
|
| 516 |
+
|
| 517 |
+
@abstractmethod
|
| 518 |
+
def prob(self, sample):
|
| 519 |
+
"""
|
| 520 |
+
Return the probability for a given sample. Probabilities
|
| 521 |
+
are always real numbers in the range [0, 1].
|
| 522 |
+
|
| 523 |
+
:param sample: The sample whose probability
|
| 524 |
+
should be returned.
|
| 525 |
+
:type sample: any
|
| 526 |
+
:rtype: float
|
| 527 |
+
"""
|
| 528 |
+
|
| 529 |
+
def logprob(self, sample):
|
| 530 |
+
"""
|
| 531 |
+
Return the base 2 logarithm of the probability for a given sample.
|
| 532 |
+
|
| 533 |
+
:param sample: The sample whose probability
|
| 534 |
+
should be returned.
|
| 535 |
+
:type sample: any
|
| 536 |
+
:rtype: float
|
| 537 |
+
"""
|
| 538 |
+
# Default definition, in terms of prob()
|
| 539 |
+
p = self.prob(sample)
|
| 540 |
+
return math.log(p, 2) if p != 0 else _NINF
|
| 541 |
+
|
| 542 |
+
@abstractmethod
|
| 543 |
+
def max(self):
|
| 544 |
+
"""
|
| 545 |
+
Return the sample with the greatest probability. If two or
|
| 546 |
+
more samples have the same probability, return one of them;
|
| 547 |
+
which sample is returned is undefined.
|
| 548 |
+
|
| 549 |
+
:rtype: any
|
| 550 |
+
"""
|
| 551 |
+
|
| 552 |
+
@abstractmethod
|
| 553 |
+
def samples(self):
|
| 554 |
+
"""
|
| 555 |
+
Return a list of all samples that have nonzero probabilities.
|
| 556 |
+
Use ``prob`` to find the probability of each sample.
|
| 557 |
+
|
| 558 |
+
:rtype: list
|
| 559 |
+
"""
|
| 560 |
+
|
| 561 |
+
# cf self.SUM_TO_ONE
|
| 562 |
+
def discount(self):
|
| 563 |
+
"""
|
| 564 |
+
Return the ratio by which counts are discounted on average: c*/c
|
| 565 |
+
|
| 566 |
+
:rtype: float
|
| 567 |
+
"""
|
| 568 |
+
return 0.0
|
| 569 |
+
|
| 570 |
+
# Subclasses should define more efficient implementations of this,
|
| 571 |
+
# where possible.
|
| 572 |
+
def generate(self):
|
| 573 |
+
"""
|
| 574 |
+
Return a randomly selected sample from this probability distribution.
|
| 575 |
+
The probability of returning each sample ``samp`` is equal to
|
| 576 |
+
``self.prob(samp)``.
|
| 577 |
+
"""
|
| 578 |
+
p = random.random()
|
| 579 |
+
p_init = p
|
| 580 |
+
for sample in self.samples():
|
| 581 |
+
p -= self.prob(sample)
|
| 582 |
+
if p <= 0:
|
| 583 |
+
return sample
|
| 584 |
+
# allow for some rounding error:
|
| 585 |
+
if p < 0.0001:
|
| 586 |
+
return sample
|
| 587 |
+
# we *should* never get here
|
| 588 |
+
if self.SUM_TO_ONE:
|
| 589 |
+
warnings.warn(
|
| 590 |
+
"Probability distribution %r sums to %r; generate()"
|
| 591 |
+
" is returning an arbitrary sample." % (self, p_init - p)
|
| 592 |
+
)
|
| 593 |
+
return random.choice(list(self.samples()))
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
class UniformProbDist(ProbDistI):
|
| 597 |
+
"""
|
| 598 |
+
A probability distribution that assigns equal probability to each
|
| 599 |
+
sample in a given set; and a zero probability to all other
|
| 600 |
+
samples.
|
| 601 |
+
"""
|
| 602 |
+
|
| 603 |
+
def __init__(self, samples):
|
| 604 |
+
"""
|
| 605 |
+
Construct a new uniform probability distribution, that assigns
|
| 606 |
+
equal probability to each sample in ``samples``.
|
| 607 |
+
|
| 608 |
+
:param samples: The samples that should be given uniform
|
| 609 |
+
probability.
|
| 610 |
+
:type samples: list
|
| 611 |
+
:raise ValueError: If ``samples`` is empty.
|
| 612 |
+
"""
|
| 613 |
+
if len(samples) == 0:
|
| 614 |
+
raise ValueError(
|
| 615 |
+
"A Uniform probability distribution must " + "have at least one sample."
|
| 616 |
+
)
|
| 617 |
+
self._sampleset = set(samples)
|
| 618 |
+
self._prob = 1.0 / len(self._sampleset)
|
| 619 |
+
self._samples = list(self._sampleset)
|
| 620 |
+
|
| 621 |
+
def prob(self, sample):
|
| 622 |
+
return self._prob if sample in self._sampleset else 0
|
| 623 |
+
|
| 624 |
+
def max(self):
|
| 625 |
+
return self._samples[0]
|
| 626 |
+
|
| 627 |
+
def samples(self):
|
| 628 |
+
return self._samples
|
| 629 |
+
|
| 630 |
+
def __repr__(self):
|
| 631 |
+
return "<UniformProbDist with %d samples>" % len(self._sampleset)
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
class RandomProbDist(ProbDistI):
|
| 635 |
+
"""
|
| 636 |
+
Generates a random probability distribution whereby each sample
|
| 637 |
+
will be between 0 and 1 with equal probability (uniform random distribution.
|
| 638 |
+
Also called a continuous uniform distribution).
|
| 639 |
+
"""
|
| 640 |
+
|
| 641 |
+
def __init__(self, samples):
|
| 642 |
+
if len(samples) == 0:
|
| 643 |
+
raise ValueError(
|
| 644 |
+
"A probability distribution must " + "have at least one sample."
|
| 645 |
+
)
|
| 646 |
+
self._probs = self.unirand(samples)
|
| 647 |
+
self._samples = list(self._probs.keys())
|
| 648 |
+
|
| 649 |
+
@classmethod
|
| 650 |
+
def unirand(cls, samples):
|
| 651 |
+
"""
|
| 652 |
+
The key function that creates a randomized initial distribution
|
| 653 |
+
that still sums to 1. Set as a dictionary of prob values so that
|
| 654 |
+
it can still be passed to MutableProbDist and called with identical
|
| 655 |
+
syntax to UniformProbDist
|
| 656 |
+
"""
|
| 657 |
+
samples = set(samples)
|
| 658 |
+
randrow = [random.random() for i in range(len(samples))]
|
| 659 |
+
total = sum(randrow)
|
| 660 |
+
for i, x in enumerate(randrow):
|
| 661 |
+
randrow[i] = x / total
|
| 662 |
+
|
| 663 |
+
total = sum(randrow)
|
| 664 |
+
if total != 1:
|
| 665 |
+
# this difference, if present, is so small (near NINF) that it
|
| 666 |
+
# can be subtracted from any element without risking probs not (0 1)
|
| 667 |
+
randrow[-1] -= total - 1
|
| 668 |
+
|
| 669 |
+
return {s: randrow[i] for i, s in enumerate(samples)}
|
| 670 |
+
|
| 671 |
+
def max(self):
|
| 672 |
+
if not hasattr(self, "_max"):
|
| 673 |
+
self._max = max((p, v) for (v, p) in self._probs.items())[1]
|
| 674 |
+
return self._max
|
| 675 |
+
|
| 676 |
+
def prob(self, sample):
|
| 677 |
+
return self._probs.get(sample, 0)
|
| 678 |
+
|
| 679 |
+
def samples(self):
|
| 680 |
+
return self._samples
|
| 681 |
+
|
| 682 |
+
def __repr__(self):
|
| 683 |
+
return "<RandomUniformProbDist with %d samples>" % len(self._probs)
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
class DictionaryProbDist(ProbDistI):
|
| 687 |
+
"""
|
| 688 |
+
A probability distribution whose probabilities are directly
|
| 689 |
+
specified by a given dictionary. The given dictionary maps
|
| 690 |
+
samples to probabilities.
|
| 691 |
+
"""
|
| 692 |
+
|
| 693 |
+
def __init__(self, prob_dict=None, log=False, normalize=False):
|
| 694 |
+
"""
|
| 695 |
+
Construct a new probability distribution from the given
|
| 696 |
+
dictionary, which maps values to probabilities (or to log
|
| 697 |
+
probabilities, if ``log`` is true). If ``normalize`` is
|
| 698 |
+
true, then the probability values are scaled by a constant
|
| 699 |
+
factor such that they sum to 1.
|
| 700 |
+
|
| 701 |
+
If called without arguments, the resulting probability
|
| 702 |
+
distribution assigns zero probability to all values.
|
| 703 |
+
"""
|
| 704 |
+
|
| 705 |
+
self._prob_dict = prob_dict.copy() if prob_dict is not None else {}
|
| 706 |
+
self._log = log
|
| 707 |
+
|
| 708 |
+
# Normalize the distribution, if requested.
|
| 709 |
+
if normalize:
|
| 710 |
+
if len(prob_dict) == 0:
|
| 711 |
+
raise ValueError(
|
| 712 |
+
"A DictionaryProbDist must have at least one sample "
|
| 713 |
+
+ "before it can be normalized."
|
| 714 |
+
)
|
| 715 |
+
if log:
|
| 716 |
+
value_sum = sum_logs(list(self._prob_dict.values()))
|
| 717 |
+
if value_sum <= _NINF:
|
| 718 |
+
logp = math.log(1.0 / len(prob_dict), 2)
|
| 719 |
+
for x in prob_dict:
|
| 720 |
+
self._prob_dict[x] = logp
|
| 721 |
+
else:
|
| 722 |
+
for (x, p) in self._prob_dict.items():
|
| 723 |
+
self._prob_dict[x] -= value_sum
|
| 724 |
+
else:
|
| 725 |
+
value_sum = sum(self._prob_dict.values())
|
| 726 |
+
if value_sum == 0:
|
| 727 |
+
p = 1.0 / len(prob_dict)
|
| 728 |
+
for x in prob_dict:
|
| 729 |
+
self._prob_dict[x] = p
|
| 730 |
+
else:
|
| 731 |
+
norm_factor = 1.0 / value_sum
|
| 732 |
+
for (x, p) in self._prob_dict.items():
|
| 733 |
+
self._prob_dict[x] *= norm_factor
|
| 734 |
+
|
| 735 |
+
def prob(self, sample):
|
| 736 |
+
if self._log:
|
| 737 |
+
return 2 ** (self._prob_dict[sample]) if sample in self._prob_dict else 0
|
| 738 |
+
else:
|
| 739 |
+
return self._prob_dict.get(sample, 0)
|
| 740 |
+
|
| 741 |
+
def logprob(self, sample):
|
| 742 |
+
if self._log:
|
| 743 |
+
return self._prob_dict.get(sample, _NINF)
|
| 744 |
+
else:
|
| 745 |
+
if sample not in self._prob_dict:
|
| 746 |
+
return _NINF
|
| 747 |
+
elif self._prob_dict[sample] == 0:
|
| 748 |
+
return _NINF
|
| 749 |
+
else:
|
| 750 |
+
return math.log(self._prob_dict[sample], 2)
|
| 751 |
+
|
| 752 |
+
def max(self):
|
| 753 |
+
if not hasattr(self, "_max"):
|
| 754 |
+
self._max = max((p, v) for (v, p) in self._prob_dict.items())[1]
|
| 755 |
+
return self._max
|
| 756 |
+
|
| 757 |
+
def samples(self):
|
| 758 |
+
return self._prob_dict.keys()
|
| 759 |
+
|
| 760 |
+
def __repr__(self):
|
| 761 |
+
return "<ProbDist with %d samples>" % len(self._prob_dict)
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
class MLEProbDist(ProbDistI):
|
| 765 |
+
"""
|
| 766 |
+
The maximum likelihood estimate for the probability distribution
|
| 767 |
+
of the experiment used to generate a frequency distribution. The
|
| 768 |
+
"maximum likelihood estimate" approximates the probability of
|
| 769 |
+
each sample as the frequency of that sample in the frequency
|
| 770 |
+
distribution.
|
| 771 |
+
"""
|
| 772 |
+
|
| 773 |
+
def __init__(self, freqdist, bins=None):
|
| 774 |
+
"""
|
| 775 |
+
Use the maximum likelihood estimate to create a probability
|
| 776 |
+
distribution for the experiment used to generate ``freqdist``.
|
| 777 |
+
|
| 778 |
+
:type freqdist: FreqDist
|
| 779 |
+
:param freqdist: The frequency distribution that the
|
| 780 |
+
probability estimates should be based on.
|
| 781 |
+
"""
|
| 782 |
+
self._freqdist = freqdist
|
| 783 |
+
|
| 784 |
+
def freqdist(self):
|
| 785 |
+
"""
|
| 786 |
+
Return the frequency distribution that this probability
|
| 787 |
+
distribution is based on.
|
| 788 |
+
|
| 789 |
+
:rtype: FreqDist
|
| 790 |
+
"""
|
| 791 |
+
return self._freqdist
|
| 792 |
+
|
| 793 |
+
def prob(self, sample):
|
| 794 |
+
return self._freqdist.freq(sample)
|
| 795 |
+
|
| 796 |
+
def max(self):
|
| 797 |
+
return self._freqdist.max()
|
| 798 |
+
|
| 799 |
+
def samples(self):
|
| 800 |
+
return self._freqdist.keys()
|
| 801 |
+
|
| 802 |
+
def __repr__(self):
|
| 803 |
+
"""
|
| 804 |
+
:rtype: str
|
| 805 |
+
:return: A string representation of this ``ProbDist``.
|
| 806 |
+
"""
|
| 807 |
+
return "<MLEProbDist based on %d samples>" % self._freqdist.N()
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
class LidstoneProbDist(ProbDistI):
|
| 811 |
+
"""
|
| 812 |
+
The Lidstone estimate for the probability distribution of the
|
| 813 |
+
experiment used to generate a frequency distribution. The
|
| 814 |
+
"Lidstone estimate" is parameterized by a real number *gamma*,
|
| 815 |
+
which typically ranges from 0 to 1. The Lidstone estimate
|
| 816 |
+
approximates the probability of a sample with count *c* from an
|
| 817 |
+
experiment with *N* outcomes and *B* bins as
|
| 818 |
+
``c+gamma)/(N+B*gamma)``. This is equivalent to adding
|
| 819 |
+
*gamma* to the count for each bin, and taking the maximum
|
| 820 |
+
likelihood estimate of the resulting frequency distribution.
|
| 821 |
+
"""
|
| 822 |
+
|
| 823 |
+
SUM_TO_ONE = False
|
| 824 |
+
|
| 825 |
+
def __init__(self, freqdist, gamma, bins=None):
|
| 826 |
+
"""
|
| 827 |
+
Use the Lidstone estimate to create a probability distribution
|
| 828 |
+
for the experiment used to generate ``freqdist``.
|
| 829 |
+
|
| 830 |
+
:type freqdist: FreqDist
|
| 831 |
+
:param freqdist: The frequency distribution that the
|
| 832 |
+
probability estimates should be based on.
|
| 833 |
+
:type gamma: float
|
| 834 |
+
:param gamma: A real number used to parameterize the
|
| 835 |
+
estimate. The Lidstone estimate is equivalent to adding
|
| 836 |
+
*gamma* to the count for each bin, and taking the
|
| 837 |
+
maximum likelihood estimate of the resulting frequency
|
| 838 |
+
distribution.
|
| 839 |
+
:type bins: int
|
| 840 |
+
:param bins: The number of sample values that can be generated
|
| 841 |
+
by the experiment that is described by the probability
|
| 842 |
+
distribution. This value must be correctly set for the
|
| 843 |
+
probabilities of the sample values to sum to one. If
|
| 844 |
+
``bins`` is not specified, it defaults to ``freqdist.B()``.
|
| 845 |
+
"""
|
| 846 |
+
if (bins == 0) or (bins is None and freqdist.N() == 0):
|
| 847 |
+
name = self.__class__.__name__[:-8]
|
| 848 |
+
raise ValueError(
|
| 849 |
+
"A %s probability distribution " % name + "must have at least one bin."
|
| 850 |
+
)
|
| 851 |
+
if (bins is not None) and (bins < freqdist.B()):
|
| 852 |
+
name = self.__class__.__name__[:-8]
|
| 853 |
+
raise ValueError(
|
| 854 |
+
"\nThe number of bins in a %s distribution " % name
|
| 855 |
+
+ "(%d) must be greater than or equal to\n" % bins
|
| 856 |
+
+ "the number of bins in the FreqDist used "
|
| 857 |
+
+ "to create it (%d)." % freqdist.B()
|
| 858 |
+
)
|
| 859 |
+
|
| 860 |
+
self._freqdist = freqdist
|
| 861 |
+
self._gamma = float(gamma)
|
| 862 |
+
self._N = self._freqdist.N()
|
| 863 |
+
|
| 864 |
+
if bins is None:
|
| 865 |
+
bins = freqdist.B()
|
| 866 |
+
self._bins = bins
|
| 867 |
+
|
| 868 |
+
self._divisor = self._N + bins * gamma
|
| 869 |
+
if self._divisor == 0.0:
|
| 870 |
+
# In extreme cases we force the probability to be 0,
|
| 871 |
+
# which it will be, since the count will be 0:
|
| 872 |
+
self._gamma = 0
|
| 873 |
+
self._divisor = 1
|
| 874 |
+
|
| 875 |
+
def freqdist(self):
|
| 876 |
+
"""
|
| 877 |
+
Return the frequency distribution that this probability
|
| 878 |
+
distribution is based on.
|
| 879 |
+
|
| 880 |
+
:rtype: FreqDist
|
| 881 |
+
"""
|
| 882 |
+
return self._freqdist
|
| 883 |
+
|
| 884 |
+
def prob(self, sample):
|
| 885 |
+
c = self._freqdist[sample]
|
| 886 |
+
return (c + self._gamma) / self._divisor
|
| 887 |
+
|
| 888 |
+
def max(self):
|
| 889 |
+
# For Lidstone distributions, probability is monotonic with
|
| 890 |
+
# frequency, so the most probable sample is the one that
|
| 891 |
+
# occurs most frequently.
|
| 892 |
+
return self._freqdist.max()
|
| 893 |
+
|
| 894 |
+
def samples(self):
|
| 895 |
+
return self._freqdist.keys()
|
| 896 |
+
|
| 897 |
+
def discount(self):
|
| 898 |
+
gb = self._gamma * self._bins
|
| 899 |
+
return gb / (self._N + gb)
|
| 900 |
+
|
| 901 |
+
def __repr__(self):
|
| 902 |
+
"""
|
| 903 |
+
Return a string representation of this ``ProbDist``.
|
| 904 |
+
|
| 905 |
+
:rtype: str
|
| 906 |
+
"""
|
| 907 |
+
return "<LidstoneProbDist based on %d samples>" % self._freqdist.N()
|
| 908 |
+
|
| 909 |
+
|
| 910 |
+
class LaplaceProbDist(LidstoneProbDist):
|
| 911 |
+
"""
|
| 912 |
+
The Laplace estimate for the probability distribution of the
|
| 913 |
+
experiment used to generate a frequency distribution. The
|
| 914 |
+
"Laplace estimate" approximates the probability of a sample with
|
| 915 |
+
count *c* from an experiment with *N* outcomes and *B* bins as
|
| 916 |
+
*(c+1)/(N+B)*. This is equivalent to adding one to the count for
|
| 917 |
+
each bin, and taking the maximum likelihood estimate of the
|
| 918 |
+
resulting frequency distribution.
|
| 919 |
+
"""
|
| 920 |
+
|
| 921 |
+
def __init__(self, freqdist, bins=None):
|
| 922 |
+
"""
|
| 923 |
+
Use the Laplace estimate to create a probability distribution
|
| 924 |
+
for the experiment used to generate ``freqdist``.
|
| 925 |
+
|
| 926 |
+
:type freqdist: FreqDist
|
| 927 |
+
:param freqdist: The frequency distribution that the
|
| 928 |
+
probability estimates should be based on.
|
| 929 |
+
:type bins: int
|
| 930 |
+
:param bins: The number of sample values that can be generated
|
| 931 |
+
by the experiment that is described by the probability
|
| 932 |
+
distribution. This value must be correctly set for the
|
| 933 |
+
probabilities of the sample values to sum to one. If
|
| 934 |
+
``bins`` is not specified, it defaults to ``freqdist.B()``.
|
| 935 |
+
"""
|
| 936 |
+
LidstoneProbDist.__init__(self, freqdist, 1, bins)
|
| 937 |
+
|
| 938 |
+
def __repr__(self):
|
| 939 |
+
"""
|
| 940 |
+
:rtype: str
|
| 941 |
+
:return: A string representation of this ``ProbDist``.
|
| 942 |
+
"""
|
| 943 |
+
return "<LaplaceProbDist based on %d samples>" % self._freqdist.N()
|
| 944 |
+
|
| 945 |
+
|
| 946 |
+
class ELEProbDist(LidstoneProbDist):
|
| 947 |
+
"""
|
| 948 |
+
The expected likelihood estimate for the probability distribution
|
| 949 |
+
of the experiment used to generate a frequency distribution. The
|
| 950 |
+
"expected likelihood estimate" approximates the probability of a
|
| 951 |
+
sample with count *c* from an experiment with *N* outcomes and
|
| 952 |
+
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5
|
| 953 |
+
to the count for each bin, and taking the maximum likelihood
|
| 954 |
+
estimate of the resulting frequency distribution.
|
| 955 |
+
"""
|
| 956 |
+
|
| 957 |
+
def __init__(self, freqdist, bins=None):
|
| 958 |
+
"""
|
| 959 |
+
Use the expected likelihood estimate to create a probability
|
| 960 |
+
distribution for the experiment used to generate ``freqdist``.
|
| 961 |
+
|
| 962 |
+
:type freqdist: FreqDist
|
| 963 |
+
:param freqdist: The frequency distribution that the
|
| 964 |
+
probability estimates should be based on.
|
| 965 |
+
:type bins: int
|
| 966 |
+
:param bins: The number of sample values that can be generated
|
| 967 |
+
by the experiment that is described by the probability
|
| 968 |
+
distribution. This value must be correctly set for the
|
| 969 |
+
probabilities of the sample values to sum to one. If
|
| 970 |
+
``bins`` is not specified, it defaults to ``freqdist.B()``.
|
| 971 |
+
"""
|
| 972 |
+
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
|
| 973 |
+
|
| 974 |
+
def __repr__(self):
|
| 975 |
+
"""
|
| 976 |
+
Return a string representation of this ``ProbDist``.
|
| 977 |
+
|
| 978 |
+
:rtype: str
|
| 979 |
+
"""
|
| 980 |
+
return "<ELEProbDist based on %d samples>" % self._freqdist.N()
|
| 981 |
+
|
| 982 |
+
|
| 983 |
+
class HeldoutProbDist(ProbDistI):
|
| 984 |
+
"""
|
| 985 |
+
The heldout estimate for the probability distribution of the
|
| 986 |
+
experiment used to generate two frequency distributions. These
|
| 987 |
+
two frequency distributions are called the "heldout frequency
|
| 988 |
+
distribution" and the "base frequency distribution." The
|
| 989 |
+
"heldout estimate" uses uses the "heldout frequency
|
| 990 |
+
distribution" to predict the probability of each sample, given its
|
| 991 |
+
frequency in the "base frequency distribution".
|
| 992 |
+
|
| 993 |
+
In particular, the heldout estimate approximates the probability
|
| 994 |
+
for a sample that occurs *r* times in the base distribution as
|
| 995 |
+
the average frequency in the heldout distribution of all samples
|
| 996 |
+
that occur *r* times in the base distribution.
|
| 997 |
+
|
| 998 |
+
This average frequency is *Tr[r]/(Nr[r].N)*, where:
|
| 999 |
+
|
| 1000 |
+
- *Tr[r]* is the total count in the heldout distribution for
|
| 1001 |
+
all samples that occur *r* times in the base distribution.
|
| 1002 |
+
- *Nr[r]* is the number of samples that occur *r* times in
|
| 1003 |
+
the base distribution.
|
| 1004 |
+
- *N* is the number of outcomes recorded by the heldout
|
| 1005 |
+
frequency distribution.
|
| 1006 |
+
|
| 1007 |
+
In order to increase the efficiency of the ``prob`` member
|
| 1008 |
+
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
|
| 1009 |
+
when the ``HeldoutProbDist`` is created.
|
| 1010 |
+
|
| 1011 |
+
:type _estimate: list(float)
|
| 1012 |
+
:ivar _estimate: A list mapping from *r*, the number of
|
| 1013 |
+
times that a sample occurs in the base distribution, to the
|
| 1014 |
+
probability estimate for that sample. ``_estimate[r]`` is
|
| 1015 |
+
calculated by finding the average frequency in the heldout
|
| 1016 |
+
distribution of all samples that occur *r* times in the base
|
| 1017 |
+
distribution. In particular, ``_estimate[r]`` =
|
| 1018 |
+
*Tr[r]/(Nr[r].N)*.
|
| 1019 |
+
:type _max_r: int
|
| 1020 |
+
:ivar _max_r: The maximum number of times that any sample occurs
|
| 1021 |
+
in the base distribution. ``_max_r`` is used to decide how
|
| 1022 |
+
large ``_estimate`` must be.
|
| 1023 |
+
"""
|
| 1024 |
+
|
| 1025 |
+
SUM_TO_ONE = False
|
| 1026 |
+
|
| 1027 |
+
def __init__(self, base_fdist, heldout_fdist, bins=None):
|
| 1028 |
+
"""
|
| 1029 |
+
Use the heldout estimate to create a probability distribution
|
| 1030 |
+
for the experiment used to generate ``base_fdist`` and
|
| 1031 |
+
``heldout_fdist``.
|
| 1032 |
+
|
| 1033 |
+
:type base_fdist: FreqDist
|
| 1034 |
+
:param base_fdist: The base frequency distribution.
|
| 1035 |
+
:type heldout_fdist: FreqDist
|
| 1036 |
+
:param heldout_fdist: The heldout frequency distribution.
|
| 1037 |
+
:type bins: int
|
| 1038 |
+
:param bins: The number of sample values that can be generated
|
| 1039 |
+
by the experiment that is described by the probability
|
| 1040 |
+
distribution. This value must be correctly set for the
|
| 1041 |
+
probabilities of the sample values to sum to one. If
|
| 1042 |
+
``bins`` is not specified, it defaults to ``freqdist.B()``.
|
| 1043 |
+
"""
|
| 1044 |
+
|
| 1045 |
+
self._base_fdist = base_fdist
|
| 1046 |
+
self._heldout_fdist = heldout_fdist
|
| 1047 |
+
|
| 1048 |
+
# The max number of times any sample occurs in base_fdist.
|
| 1049 |
+
self._max_r = base_fdist[base_fdist.max()]
|
| 1050 |
+
|
| 1051 |
+
# Calculate Tr, Nr, and N.
|
| 1052 |
+
Tr = self._calculate_Tr()
|
| 1053 |
+
r_Nr = base_fdist.r_Nr(bins)
|
| 1054 |
+
Nr = [r_Nr[r] for r in range(self._max_r + 1)]
|
| 1055 |
+
N = heldout_fdist.N()
|
| 1056 |
+
|
| 1057 |
+
# Use Tr, Nr, and N to compute the probability estimate for
|
| 1058 |
+
# each value of r.
|
| 1059 |
+
self._estimate = self._calculate_estimate(Tr, Nr, N)
|
| 1060 |
+
|
| 1061 |
+
def _calculate_Tr(self):
|
| 1062 |
+
"""
|
| 1063 |
+
Return the list *Tr*, where *Tr[r]* is the total count in
|
| 1064 |
+
``heldout_fdist`` for all samples that occur *r*
|
| 1065 |
+
times in ``base_fdist``.
|
| 1066 |
+
|
| 1067 |
+
:rtype: list(float)
|
| 1068 |
+
"""
|
| 1069 |
+
Tr = [0.0] * (self._max_r + 1)
|
| 1070 |
+
for sample in self._heldout_fdist:
|
| 1071 |
+
r = self._base_fdist[sample]
|
| 1072 |
+
Tr[r] += self._heldout_fdist[sample]
|
| 1073 |
+
return Tr
|
| 1074 |
+
|
| 1075 |
+
def _calculate_estimate(self, Tr, Nr, N):
|
| 1076 |
+
"""
|
| 1077 |
+
Return the list *estimate*, where *estimate[r]* is the probability
|
| 1078 |
+
estimate for any sample that occurs *r* times in the base frequency
|
| 1079 |
+
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
|
| 1080 |
+
In the special case that *N[r]=0*, *estimate[r]* will never be used;
|
| 1081 |
+
so we define *estimate[r]=None* for those cases.
|
| 1082 |
+
|
| 1083 |
+
:rtype: list(float)
|
| 1084 |
+
:type Tr: list(float)
|
| 1085 |
+
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
|
| 1086 |
+
the heldout distribution for all samples that occur *r*
|
| 1087 |
+
times in base distribution.
|
| 1088 |
+
:type Nr: list(float)
|
| 1089 |
+
:param Nr: The list *Nr*, where *Nr[r]* is the number of
|
| 1090 |
+
samples that occur *r* times in the base distribution.
|
| 1091 |
+
:type N: int
|
| 1092 |
+
:param N: The total number of outcomes recorded by the heldout
|
| 1093 |
+
frequency distribution.
|
| 1094 |
+
"""
|
| 1095 |
+
estimate = []
|
| 1096 |
+
for r in range(self._max_r + 1):
|
| 1097 |
+
if Nr[r] == 0:
|
| 1098 |
+
estimate.append(None)
|
| 1099 |
+
else:
|
| 1100 |
+
estimate.append(Tr[r] / (Nr[r] * N))
|
| 1101 |
+
return estimate
|
| 1102 |
+
|
| 1103 |
+
def base_fdist(self):
|
| 1104 |
+
"""
|
| 1105 |
+
Return the base frequency distribution that this probability
|
| 1106 |
+
distribution is based on.
|
| 1107 |
+
|
| 1108 |
+
:rtype: FreqDist
|
| 1109 |
+
"""
|
| 1110 |
+
return self._base_fdist
|
| 1111 |
+
|
| 1112 |
+
def heldout_fdist(self):
|
| 1113 |
+
"""
|
| 1114 |
+
Return the heldout frequency distribution that this
|
| 1115 |
+
probability distribution is based on.
|
| 1116 |
+
|
| 1117 |
+
:rtype: FreqDist
|
| 1118 |
+
"""
|
| 1119 |
+
return self._heldout_fdist
|
| 1120 |
+
|
| 1121 |
+
def samples(self):
|
| 1122 |
+
return self._base_fdist.keys()
|
| 1123 |
+
|
| 1124 |
+
def prob(self, sample):
|
| 1125 |
+
# Use our precomputed probability estimate.
|
| 1126 |
+
r = self._base_fdist[sample]
|
| 1127 |
+
return self._estimate[r]
|
| 1128 |
+
|
| 1129 |
+
def max(self):
|
| 1130 |
+
# Note: the Heldout estimation is *not* necessarily monotonic;
|
| 1131 |
+
# so this implementation is currently broken. However, it
|
| 1132 |
+
# should give the right answer *most* of the time. :)
|
| 1133 |
+
return self._base_fdist.max()
|
| 1134 |
+
|
| 1135 |
+
def discount(self):
|
| 1136 |
+
raise NotImplementedError()
|
| 1137 |
+
|
| 1138 |
+
def __repr__(self):
|
| 1139 |
+
"""
|
| 1140 |
+
:rtype: str
|
| 1141 |
+
:return: A string representation of this ``ProbDist``.
|
| 1142 |
+
"""
|
| 1143 |
+
s = "<HeldoutProbDist: %d base samples; %d heldout samples>"
|
| 1144 |
+
return s % (self._base_fdist.N(), self._heldout_fdist.N())
|
| 1145 |
+
|
| 1146 |
+
|
| 1147 |
+
class CrossValidationProbDist(ProbDistI):
|
| 1148 |
+
"""
|
| 1149 |
+
The cross-validation estimate for the probability distribution of
|
| 1150 |
+
the experiment used to generate a set of frequency distribution.
|
| 1151 |
+
The "cross-validation estimate" for the probability of a sample
|
| 1152 |
+
is found by averaging the held-out estimates for the sample in
|
| 1153 |
+
each pair of frequency distributions.
|
| 1154 |
+
"""
|
| 1155 |
+
|
| 1156 |
+
SUM_TO_ONE = False
|
| 1157 |
+
|
| 1158 |
+
def __init__(self, freqdists, bins):
|
| 1159 |
+
"""
|
| 1160 |
+
Use the cross-validation estimate to create a probability
|
| 1161 |
+
distribution for the experiment used to generate
|
| 1162 |
+
``freqdists``.
|
| 1163 |
+
|
| 1164 |
+
:type freqdists: list(FreqDist)
|
| 1165 |
+
:param freqdists: A list of the frequency distributions
|
| 1166 |
+
generated by the experiment.
|
| 1167 |
+
:type bins: int
|
| 1168 |
+
:param bins: The number of sample values that can be generated
|
| 1169 |
+
by the experiment that is described by the probability
|
| 1170 |
+
distribution. This value must be correctly set for the
|
| 1171 |
+
probabilities of the sample values to sum to one. If
|
| 1172 |
+
``bins`` is not specified, it defaults to ``freqdist.B()``.
|
| 1173 |
+
"""
|
| 1174 |
+
self._freqdists = freqdists
|
| 1175 |
+
|
| 1176 |
+
# Create a heldout probability distribution for each pair of
|
| 1177 |
+
# frequency distributions in freqdists.
|
| 1178 |
+
self._heldout_probdists = []
|
| 1179 |
+
for fdist1 in freqdists:
|
| 1180 |
+
for fdist2 in freqdists:
|
| 1181 |
+
if fdist1 is not fdist2:
|
| 1182 |
+
probdist = HeldoutProbDist(fdist1, fdist2, bins)
|
| 1183 |
+
self._heldout_probdists.append(probdist)
|
| 1184 |
+
|
| 1185 |
+
def freqdists(self):
|
| 1186 |
+
"""
|
| 1187 |
+
Return the list of frequency distributions that this ``ProbDist`` is based on.
|
| 1188 |
+
|
| 1189 |
+
:rtype: list(FreqDist)
|
| 1190 |
+
"""
|
| 1191 |
+
return self._freqdists
|
| 1192 |
+
|
| 1193 |
+
def samples(self):
|
| 1194 |
+
# [xx] nb: this is not too efficient
|
| 1195 |
+
return set(sum((list(fd) for fd in self._freqdists), []))
|
| 1196 |
+
|
| 1197 |
+
def prob(self, sample):
|
| 1198 |
+
# Find the average probability estimate returned by each
|
| 1199 |
+
# heldout distribution.
|
| 1200 |
+
prob = 0.0
|
| 1201 |
+
for heldout_probdist in self._heldout_probdists:
|
| 1202 |
+
prob += heldout_probdist.prob(sample)
|
| 1203 |
+
return prob / len(self._heldout_probdists)
|
| 1204 |
+
|
| 1205 |
+
def discount(self):
|
| 1206 |
+
raise NotImplementedError()
|
| 1207 |
+
|
| 1208 |
+
def __repr__(self):
|
| 1209 |
+
"""
|
| 1210 |
+
Return a string representation of this ``ProbDist``.
|
| 1211 |
+
|
| 1212 |
+
:rtype: str
|
| 1213 |
+
"""
|
| 1214 |
+
return "<CrossValidationProbDist: %d-way>" % len(self._freqdists)
|
| 1215 |
+
|
| 1216 |
+
|
| 1217 |
+
class WittenBellProbDist(ProbDistI):
|
| 1218 |
+
"""
|
| 1219 |
+
The Witten-Bell estimate of a probability distribution. This distribution
|
| 1220 |
+
allocates uniform probability mass to as yet unseen events by using the
|
| 1221 |
+
number of events that have only been seen once. The probability mass
|
| 1222 |
+
reserved for unseen events is equal to *T / (N + T)*
|
| 1223 |
+
where *T* is the number of observed event types and *N* is the total
|
| 1224 |
+
number of observed events. This equates to the maximum likelihood estimate
|
| 1225 |
+
of a new type event occurring. The remaining probability mass is discounted
|
| 1226 |
+
such that all probability estimates sum to one, yielding:
|
| 1227 |
+
|
| 1228 |
+
- *p = T / Z (N + T)*, if count = 0
|
| 1229 |
+
- *p = c / (N + T)*, otherwise
|
| 1230 |
+
"""
|
| 1231 |
+
|
| 1232 |
+
def __init__(self, freqdist, bins=None):
|
| 1233 |
+
"""
|
| 1234 |
+
Creates a distribution of Witten-Bell probability estimates. This
|
| 1235 |
+
distribution allocates uniform probability mass to as yet unseen
|
| 1236 |
+
events by using the number of events that have only been seen once. The
|
| 1237 |
+
probability mass reserved for unseen events is equal to *T / (N + T)*
|
| 1238 |
+
where *T* is the number of observed event types and *N* is the total
|
| 1239 |
+
number of observed events. This equates to the maximum likelihood
|
| 1240 |
+
estimate of a new type event occurring. The remaining probability mass
|
| 1241 |
+
is discounted such that all probability estimates sum to one,
|
| 1242 |
+
yielding:
|
| 1243 |
+
|
| 1244 |
+
- *p = T / Z (N + T)*, if count = 0
|
| 1245 |
+
- *p = c / (N + T)*, otherwise
|
| 1246 |
+
|
| 1247 |
+
The parameters *T* and *N* are taken from the ``freqdist`` parameter
|
| 1248 |
+
(the ``B()`` and ``N()`` values). The normalizing factor *Z* is
|
| 1249 |
+
calculated using these values along with the ``bins`` parameter.
|
| 1250 |
+
|
| 1251 |
+
:param freqdist: The frequency counts upon which to base the
|
| 1252 |
+
estimation.
|
| 1253 |
+
:type freqdist: FreqDist
|
| 1254 |
+
:param bins: The number of possible event types. This must be at least
|
| 1255 |
+
as large as the number of bins in the ``freqdist``. If None, then
|
| 1256 |
+
it's assumed to be equal to that of the ``freqdist``
|
| 1257 |
+
:type bins: int
|
| 1258 |
+
"""
|
| 1259 |
+
assert bins is None or bins >= freqdist.B(), (
|
| 1260 |
+
"bins parameter must not be less than %d=freqdist.B()" % freqdist.B()
|
| 1261 |
+
)
|
| 1262 |
+
if bins is None:
|
| 1263 |
+
bins = freqdist.B()
|
| 1264 |
+
self._freqdist = freqdist
|
| 1265 |
+
self._T = self._freqdist.B()
|
| 1266 |
+
self._Z = bins - self._freqdist.B()
|
| 1267 |
+
self._N = self._freqdist.N()
|
| 1268 |
+
# self._P0 is P(0), precalculated for efficiency:
|
| 1269 |
+
if self._N == 0:
|
| 1270 |
+
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
|
| 1271 |
+
self._P0 = 1.0 / self._Z
|
| 1272 |
+
else:
|
| 1273 |
+
self._P0 = self._T / (self._Z * (self._N + self._T))
|
| 1274 |
+
|
| 1275 |
+
def prob(self, sample):
|
| 1276 |
+
# inherit docs from ProbDistI
|
| 1277 |
+
c = self._freqdist[sample]
|
| 1278 |
+
return c / (self._N + self._T) if c != 0 else self._P0
|
| 1279 |
+
|
| 1280 |
+
def max(self):
|
| 1281 |
+
return self._freqdist.max()
|
| 1282 |
+
|
| 1283 |
+
def samples(self):
|
| 1284 |
+
return self._freqdist.keys()
|
| 1285 |
+
|
| 1286 |
+
def freqdist(self):
|
| 1287 |
+
return self._freqdist
|
| 1288 |
+
|
| 1289 |
+
def discount(self):
|
| 1290 |
+
raise NotImplementedError()
|
| 1291 |
+
|
| 1292 |
+
def __repr__(self):
|
| 1293 |
+
"""
|
| 1294 |
+
Return a string representation of this ``ProbDist``.
|
| 1295 |
+
|
| 1296 |
+
:rtype: str
|
| 1297 |
+
"""
|
| 1298 |
+
return "<WittenBellProbDist based on %d samples>" % self._freqdist.N()
|
| 1299 |
+
|
| 1300 |
+
|
| 1301 |
+
##//////////////////////////////////////////////////////
|
| 1302 |
+
## Good-Turing Probability Distributions
|
| 1303 |
+
##//////////////////////////////////////////////////////
|
| 1304 |
+
|
| 1305 |
+
# Good-Turing frequency estimation was contributed by Alan Turing and
|
| 1306 |
+
# his statistical assistant I.J. Good, during their collaboration in
|
| 1307 |
+
# the WWII. It is a statistical technique for predicting the
|
| 1308 |
+
# probability of occurrence of objects belonging to an unknown number
|
| 1309 |
+
# of species, given past observations of such objects and their
|
| 1310 |
+
# species. (In drawing balls from an urn, the 'objects' would be balls
|
| 1311 |
+
# and the 'species' would be the distinct colors of the balls (finite
|
| 1312 |
+
# but unknown in number).
|
| 1313 |
+
#
|
| 1314 |
+
# Good-Turing method calculates the probability mass to assign to
|
| 1315 |
+
# events with zero or low counts based on the number of events with
|
| 1316 |
+
# higher counts. It does so by using the adjusted count *c\**:
|
| 1317 |
+
#
|
| 1318 |
+
# - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
|
| 1319 |
+
# - *things with frequency zero in training* = N(1) for c == 0
|
| 1320 |
+
#
|
| 1321 |
+
# where *c* is the original count, *N(i)* is the number of event types
|
| 1322 |
+
# observed with count *i*. We can think the count of unseen as the count
|
| 1323 |
+
# of frequency one (see Jurafsky & Martin 2nd Edition, p101).
|
| 1324 |
+
#
|
| 1325 |
+
# This method is problematic because the situation ``N(c+1) == 0``
|
| 1326 |
+
# is quite common in the original Good-Turing estimation; smoothing or
|
| 1327 |
+
# interpolation of *N(i)* values is essential in practice.
|
| 1328 |
+
#
|
| 1329 |
+
# Bill Gale and Geoffrey Sampson present a simple and effective approach,
|
| 1330 |
+
# Simple Good-Turing. As a smoothing curve they simply use a power curve:
|
| 1331 |
+
#
|
| 1332 |
+
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
|
| 1333 |
+
# relationship)
|
| 1334 |
+
#
|
| 1335 |
+
# They estimate a and b by simple linear regression technique on the
|
| 1336 |
+
# logarithmic form of the equation:
|
| 1337 |
+
#
|
| 1338 |
+
# log Nr = a + b*log(r)
|
| 1339 |
+
#
|
| 1340 |
+
# However, they suggest that such a simple curve is probably only
|
| 1341 |
+
# appropriate for high values of r. For low values of r, they use the
|
| 1342 |
+
# measured Nr directly. (see M&S, p.213)
|
| 1343 |
+
#
|
| 1344 |
+
# Gale and Sampson propose to use r while the difference between r and
|
| 1345 |
+
# r* is 1.96 greater than the standard deviation, and switch to r* if
|
| 1346 |
+
# it is less or equal:
|
| 1347 |
+
#
|
| 1348 |
+
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
|
| 1349 |
+
#
|
| 1350 |
+
# The 1.96 coefficient correspond to a 0.05 significance criterion,
|
| 1351 |
+
# some implementations can use a coefficient of 1.65 for a 0.1
|
| 1352 |
+
# significance criterion.
|
| 1353 |
+
#
|
| 1354 |
+
|
| 1355 |
+
##//////////////////////////////////////////////////////
|
| 1356 |
+
## Simple Good-Turing Probablity Distributions
|
| 1357 |
+
##//////////////////////////////////////////////////////
|
| 1358 |
+
|
| 1359 |
+
|
| 1360 |
+
class SimpleGoodTuringProbDist(ProbDistI):
|
| 1361 |
+
"""
|
| 1362 |
+
SimpleGoodTuring ProbDist approximates from frequency to frequency of
|
| 1363 |
+
frequency into a linear line under log space by linear regression.
|
| 1364 |
+
Details of Simple Good-Turing algorithm can be found in:
|
| 1365 |
+
|
| 1366 |
+
- Good Turing smoothing without tears" (Gale & Sampson 1995),
|
| 1367 |
+
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
|
| 1368 |
+
- "Speech and Language Processing (Jurafsky & Martin),
|
| 1369 |
+
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
|
| 1370 |
+
- https://www.grsampson.net/RGoodTur.html
|
| 1371 |
+
|
| 1372 |
+
Given a set of pair (xi, yi), where the xi denotes the frequency and
|
| 1373 |
+
yi denotes the frequency of frequency, we want to minimize their
|
| 1374 |
+
square variation. E(x) and E(y) represent the mean of xi and yi.
|
| 1375 |
+
|
| 1376 |
+
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
|
| 1377 |
+
- intercept: a = E(y) - b.E(x)
|
| 1378 |
+
"""
|
| 1379 |
+
|
| 1380 |
+
SUM_TO_ONE = False
|
| 1381 |
+
|
| 1382 |
+
def __init__(self, freqdist, bins=None):
|
| 1383 |
+
"""
|
| 1384 |
+
:param freqdist: The frequency counts upon which to base the
|
| 1385 |
+
estimation.
|
| 1386 |
+
:type freqdist: FreqDist
|
| 1387 |
+
:param bins: The number of possible event types. This must be
|
| 1388 |
+
larger than the number of bins in the ``freqdist``. If None,
|
| 1389 |
+
then it's assumed to be equal to ``freqdist``.B() + 1
|
| 1390 |
+
:type bins: int
|
| 1391 |
+
"""
|
| 1392 |
+
assert (
|
| 1393 |
+
bins is None or bins > freqdist.B()
|
| 1394 |
+
), "bins parameter must not be less than %d=freqdist.B()+1" % (freqdist.B() + 1)
|
| 1395 |
+
if bins is None:
|
| 1396 |
+
bins = freqdist.B() + 1
|
| 1397 |
+
self._freqdist = freqdist
|
| 1398 |
+
self._bins = bins
|
| 1399 |
+
r, nr = self._r_Nr()
|
| 1400 |
+
self.find_best_fit(r, nr)
|
| 1401 |
+
self._switch(r, nr)
|
| 1402 |
+
self._renormalize(r, nr)
|
| 1403 |
+
|
| 1404 |
+
def _r_Nr_non_zero(self):
|
| 1405 |
+
r_Nr = self._freqdist.r_Nr()
|
| 1406 |
+
del r_Nr[0]
|
| 1407 |
+
return r_Nr
|
| 1408 |
+
|
| 1409 |
+
def _r_Nr(self):
|
| 1410 |
+
"""
|
| 1411 |
+
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
|
| 1412 |
+
"""
|
| 1413 |
+
nonzero = self._r_Nr_non_zero()
|
| 1414 |
+
|
| 1415 |
+
if not nonzero:
|
| 1416 |
+
return [], []
|
| 1417 |
+
return zip(*sorted(nonzero.items()))
|
| 1418 |
+
|
| 1419 |
+
def find_best_fit(self, r, nr):
|
| 1420 |
+
"""
|
| 1421 |
+
Use simple linear regression to tune parameters self._slope and
|
| 1422 |
+
self._intercept in the log-log space based on count and Nr(count)
|
| 1423 |
+
(Work in log space to avoid floating point underflow.)
|
| 1424 |
+
"""
|
| 1425 |
+
# For higher sample frequencies the data points becomes horizontal
|
| 1426 |
+
# along line Nr=1. To create a more evident linear model in log-log
|
| 1427 |
+
# space, we average positive Nr values with the surrounding zero
|
| 1428 |
+
# values. (Church and Gale, 1991)
|
| 1429 |
+
|
| 1430 |
+
if not r or not nr:
|
| 1431 |
+
# Empty r or nr?
|
| 1432 |
+
return
|
| 1433 |
+
|
| 1434 |
+
zr = []
|
| 1435 |
+
for j in range(len(r)):
|
| 1436 |
+
i = r[j - 1] if j > 0 else 0
|
| 1437 |
+
k = 2 * r[j] - i if j == len(r) - 1 else r[j + 1]
|
| 1438 |
+
zr_ = 2.0 * nr[j] / (k - i)
|
| 1439 |
+
zr.append(zr_)
|
| 1440 |
+
|
| 1441 |
+
log_r = [math.log(i) for i in r]
|
| 1442 |
+
log_zr = [math.log(i) for i in zr]
|
| 1443 |
+
|
| 1444 |
+
xy_cov = x_var = 0.0
|
| 1445 |
+
x_mean = sum(log_r) / len(log_r)
|
| 1446 |
+
y_mean = sum(log_zr) / len(log_zr)
|
| 1447 |
+
for (x, y) in zip(log_r, log_zr):
|
| 1448 |
+
xy_cov += (x - x_mean) * (y - y_mean)
|
| 1449 |
+
x_var += (x - x_mean) ** 2
|
| 1450 |
+
self._slope = xy_cov / x_var if x_var != 0 else 0.0
|
| 1451 |
+
if self._slope >= -1:
|
| 1452 |
+
warnings.warn(
|
| 1453 |
+
"SimpleGoodTuring did not find a proper best fit "
|
| 1454 |
+
"line for smoothing probabilities of occurrences. "
|
| 1455 |
+
"The probability estimates are likely to be "
|
| 1456 |
+
"unreliable."
|
| 1457 |
+
)
|
| 1458 |
+
self._intercept = y_mean - self._slope * x_mean
|
| 1459 |
+
|
| 1460 |
+
def _switch(self, r, nr):
|
| 1461 |
+
"""
|
| 1462 |
+
Calculate the r frontier where we must switch from Nr to Sr
|
| 1463 |
+
when estimating E[Nr].
|
| 1464 |
+
"""
|
| 1465 |
+
for i, r_ in enumerate(r):
|
| 1466 |
+
if len(r) == i + 1 or r[i + 1] != r_ + 1:
|
| 1467 |
+
# We are at the end of r, or there is a gap in r
|
| 1468 |
+
self._switch_at = r_
|
| 1469 |
+
break
|
| 1470 |
+
|
| 1471 |
+
Sr = self.smoothedNr
|
| 1472 |
+
smooth_r_star = (r_ + 1) * Sr(r_ + 1) / Sr(r_)
|
| 1473 |
+
unsmooth_r_star = (r_ + 1) * nr[i + 1] / nr[i]
|
| 1474 |
+
|
| 1475 |
+
std = math.sqrt(self._variance(r_, nr[i], nr[i + 1]))
|
| 1476 |
+
if abs(unsmooth_r_star - smooth_r_star) <= 1.96 * std:
|
| 1477 |
+
self._switch_at = r_
|
| 1478 |
+
break
|
| 1479 |
+
|
| 1480 |
+
def _variance(self, r, nr, nr_1):
|
| 1481 |
+
r = float(r)
|
| 1482 |
+
nr = float(nr)
|
| 1483 |
+
nr_1 = float(nr_1)
|
| 1484 |
+
return (r + 1.0) ** 2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
|
| 1485 |
+
|
| 1486 |
+
def _renormalize(self, r, nr):
|
| 1487 |
+
"""
|
| 1488 |
+
It is necessary to renormalize all the probability estimates to
|
| 1489 |
+
ensure a proper probability distribution results. This can be done
|
| 1490 |
+
by keeping the estimate of the probability mass for unseen items as
|
| 1491 |
+
N(1)/N and renormalizing all the estimates for previously seen items
|
| 1492 |
+
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
|
| 1493 |
+
"""
|
| 1494 |
+
prob_cov = 0.0
|
| 1495 |
+
for r_, nr_ in zip(r, nr):
|
| 1496 |
+
prob_cov += nr_ * self._prob_measure(r_)
|
| 1497 |
+
if prob_cov:
|
| 1498 |
+
self._renormal = (1 - self._prob_measure(0)) / prob_cov
|
| 1499 |
+
|
| 1500 |
+
def smoothedNr(self, r):
|
| 1501 |
+
"""
|
| 1502 |
+
Return the number of samples with count r.
|
| 1503 |
+
|
| 1504 |
+
:param r: The amount of frequency.
|
| 1505 |
+
:type r: int
|
| 1506 |
+
:rtype: float
|
| 1507 |
+
"""
|
| 1508 |
+
|
| 1509 |
+
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
|
| 1510 |
+
# relationship)
|
| 1511 |
+
# Estimate a and b by simple linear regression technique on
|
| 1512 |
+
# the logarithmic form of the equation: log Nr = a + b*log(r)
|
| 1513 |
+
|
| 1514 |
+
return math.exp(self._intercept + self._slope * math.log(r))
|
| 1515 |
+
|
| 1516 |
+
def prob(self, sample):
|
| 1517 |
+
"""
|
| 1518 |
+
Return the sample's probability.
|
| 1519 |
+
|
| 1520 |
+
:param sample: sample of the event
|
| 1521 |
+
:type sample: str
|
| 1522 |
+
:rtype: float
|
| 1523 |
+
"""
|
| 1524 |
+
count = self._freqdist[sample]
|
| 1525 |
+
p = self._prob_measure(count)
|
| 1526 |
+
if count == 0:
|
| 1527 |
+
if self._bins == self._freqdist.B():
|
| 1528 |
+
p = 0.0
|
| 1529 |
+
else:
|
| 1530 |
+
p = p / (self._bins - self._freqdist.B())
|
| 1531 |
+
else:
|
| 1532 |
+
p = p * self._renormal
|
| 1533 |
+
return p
|
| 1534 |
+
|
| 1535 |
+
def _prob_measure(self, count):
|
| 1536 |
+
if count == 0 and self._freqdist.N() == 0:
|
| 1537 |
+
return 1.0
|
| 1538 |
+
elif count == 0 and self._freqdist.N() != 0:
|
| 1539 |
+
return self._freqdist.Nr(1) / self._freqdist.N()
|
| 1540 |
+
|
| 1541 |
+
if self._switch_at > count:
|
| 1542 |
+
Er_1 = self._freqdist.Nr(count + 1)
|
| 1543 |
+
Er = self._freqdist.Nr(count)
|
| 1544 |
+
else:
|
| 1545 |
+
Er_1 = self.smoothedNr(count + 1)
|
| 1546 |
+
Er = self.smoothedNr(count)
|
| 1547 |
+
|
| 1548 |
+
r_star = (count + 1) * Er_1 / Er
|
| 1549 |
+
return r_star / self._freqdist.N()
|
| 1550 |
+
|
| 1551 |
+
def check(self):
|
| 1552 |
+
prob_sum = 0.0
|
| 1553 |
+
for i in range(0, len(self._Nr)):
|
| 1554 |
+
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
|
| 1555 |
+
print("Probability Sum:", prob_sum)
|
| 1556 |
+
# assert prob_sum != 1.0, "probability sum should be one!"
|
| 1557 |
+
|
| 1558 |
+
def discount(self):
|
| 1559 |
+
"""
|
| 1560 |
+
This function returns the total mass of probability transfers from the
|
| 1561 |
+
seen samples to the unseen samples.
|
| 1562 |
+
"""
|
| 1563 |
+
return self.smoothedNr(1) / self._freqdist.N()
|
| 1564 |
+
|
| 1565 |
+
def max(self):
|
| 1566 |
+
return self._freqdist.max()
|
| 1567 |
+
|
| 1568 |
+
def samples(self):
|
| 1569 |
+
return self._freqdist.keys()
|
| 1570 |
+
|
| 1571 |
+
def freqdist(self):
|
| 1572 |
+
return self._freqdist
|
| 1573 |
+
|
| 1574 |
+
def __repr__(self):
|
| 1575 |
+
"""
|
| 1576 |
+
Return a string representation of this ``ProbDist``.
|
| 1577 |
+
|
| 1578 |
+
:rtype: str
|
| 1579 |
+
"""
|
| 1580 |
+
return "<SimpleGoodTuringProbDist based on %d samples>" % self._freqdist.N()
|
| 1581 |
+
|
| 1582 |
+
|
| 1583 |
+
class MutableProbDist(ProbDistI):
|
| 1584 |
+
"""
|
| 1585 |
+
An mutable probdist where the probabilities may be easily modified. This
|
| 1586 |
+
simply copies an existing probdist, storing the probability values in a
|
| 1587 |
+
mutable dictionary and providing an update method.
|
| 1588 |
+
"""
|
| 1589 |
+
|
| 1590 |
+
def __init__(self, prob_dist, samples, store_logs=True):
|
| 1591 |
+
"""
|
| 1592 |
+
Creates the mutable probdist based on the given prob_dist and using
|
| 1593 |
+
the list of samples given. These values are stored as log
|
| 1594 |
+
probabilities if the store_logs flag is set.
|
| 1595 |
+
|
| 1596 |
+
:param prob_dist: the distribution from which to garner the
|
| 1597 |
+
probabilities
|
| 1598 |
+
:type prob_dist: ProbDist
|
| 1599 |
+
:param samples: the complete set of samples
|
| 1600 |
+
:type samples: sequence of any
|
| 1601 |
+
:param store_logs: whether to store the probabilities as logarithms
|
| 1602 |
+
:type store_logs: bool
|
| 1603 |
+
"""
|
| 1604 |
+
self._samples = samples
|
| 1605 |
+
self._sample_dict = {samples[i]: i for i in range(len(samples))}
|
| 1606 |
+
self._data = array.array("d", [0.0]) * len(samples)
|
| 1607 |
+
for i in range(len(samples)):
|
| 1608 |
+
if store_logs:
|
| 1609 |
+
self._data[i] = prob_dist.logprob(samples[i])
|
| 1610 |
+
else:
|
| 1611 |
+
self._data[i] = prob_dist.prob(samples[i])
|
| 1612 |
+
self._logs = store_logs
|
| 1613 |
+
|
| 1614 |
+
def max(self):
|
| 1615 |
+
# inherit documentation
|
| 1616 |
+
return max((p, v) for (v, p) in self._sample_dict.items())[1]
|
| 1617 |
+
|
| 1618 |
+
def samples(self):
|
| 1619 |
+
# inherit documentation
|
| 1620 |
+
return self._samples
|
| 1621 |
+
|
| 1622 |
+
def prob(self, sample):
|
| 1623 |
+
# inherit documentation
|
| 1624 |
+
i = self._sample_dict.get(sample)
|
| 1625 |
+
if i is None:
|
| 1626 |
+
return 0.0
|
| 1627 |
+
return 2 ** (self._data[i]) if self._logs else self._data[i]
|
| 1628 |
+
|
| 1629 |
+
def logprob(self, sample):
|
| 1630 |
+
# inherit documentation
|
| 1631 |
+
i = self._sample_dict.get(sample)
|
| 1632 |
+
if i is None:
|
| 1633 |
+
return float("-inf")
|
| 1634 |
+
return self._data[i] if self._logs else math.log(self._data[i], 2)
|
| 1635 |
+
|
| 1636 |
+
def update(self, sample, prob, log=True):
|
| 1637 |
+
"""
|
| 1638 |
+
Update the probability for the given sample. This may cause the object
|
| 1639 |
+
to stop being the valid probability distribution - the user must
|
| 1640 |
+
ensure that they update the sample probabilities such that all samples
|
| 1641 |
+
have probabilities between 0 and 1 and that all probabilities sum to
|
| 1642 |
+
one.
|
| 1643 |
+
|
| 1644 |
+
:param sample: the sample for which to update the probability
|
| 1645 |
+
:type sample: any
|
| 1646 |
+
:param prob: the new probability
|
| 1647 |
+
:type prob: float
|
| 1648 |
+
:param log: is the probability already logged
|
| 1649 |
+
:type log: bool
|
| 1650 |
+
"""
|
| 1651 |
+
i = self._sample_dict.get(sample)
|
| 1652 |
+
assert i is not None
|
| 1653 |
+
if self._logs:
|
| 1654 |
+
self._data[i] = prob if log else math.log(prob, 2)
|
| 1655 |
+
else:
|
| 1656 |
+
self._data[i] = 2 ** (prob) if log else prob
|
| 1657 |
+
|
| 1658 |
+
|
| 1659 |
+
##/////////////////////////////////////////////////////
|
| 1660 |
+
## Kneser-Ney Probability Distribution
|
| 1661 |
+
##//////////////////////////////////////////////////////
|
| 1662 |
+
|
| 1663 |
+
# This method for calculating probabilities was introduced in 1995 by Reinhard
|
| 1664 |
+
# Kneser and Hermann Ney. It was meant to improve the accuracy of language
|
| 1665 |
+
# models that use backing-off to deal with sparse data. The authors propose two
|
| 1666 |
+
# ways of doing so: a marginal distribution constraint on the back-off
|
| 1667 |
+
# distribution and a leave-one-out distribution. For a start, the first one is
|
| 1668 |
+
# implemented as a class below.
|
| 1669 |
+
#
|
| 1670 |
+
# The idea behind a back-off n-gram model is that we have a series of
|
| 1671 |
+
# frequency distributions for our n-grams so that in case we have not seen a
|
| 1672 |
+
# given n-gram during training (and as a result have a 0 probability for it) we
|
| 1673 |
+
# can 'back off' (hence the name!) and try testing whether we've seen the
|
| 1674 |
+
# n-1-gram part of the n-gram in training.
|
| 1675 |
+
#
|
| 1676 |
+
# The novelty of Kneser and Ney's approach was that they decided to fiddle
|
| 1677 |
+
# around with the way this latter, backed off probability was being calculated
|
| 1678 |
+
# whereas their peers seemed to focus on the primary probability.
|
| 1679 |
+
#
|
| 1680 |
+
# The implementation below uses one of the techniques described in their paper
|
| 1681 |
+
# titled "Improved backing-off for n-gram language modeling." In the same paper
|
| 1682 |
+
# another technique is introduced to attempt to smooth the back-off
|
| 1683 |
+
# distribution as well as the primary one. There is also a much-cited
|
| 1684 |
+
# modification of this method proposed by Chen and Goodman.
|
| 1685 |
+
#
|
| 1686 |
+
# In order for the implementation of Kneser-Ney to be more efficient, some
|
| 1687 |
+
# changes have been made to the original algorithm. Namely, the calculation of
|
| 1688 |
+
# the normalizing function gamma has been significantly simplified and
|
| 1689 |
+
# combined slightly differently with beta. None of these changes affect the
|
| 1690 |
+
# nature of the algorithm, but instead aim to cut out unnecessary calculations
|
| 1691 |
+
# and take advantage of storing and retrieving information in dictionaries
|
| 1692 |
+
# where possible.
|
| 1693 |
+
|
| 1694 |
+
|
| 1695 |
+
class KneserNeyProbDist(ProbDistI):
|
| 1696 |
+
"""
|
| 1697 |
+
Kneser-Ney estimate of a probability distribution. This is a version of
|
| 1698 |
+
back-off that counts how likely an n-gram is provided the n-1-gram had
|
| 1699 |
+
been seen in training. Extends the ProbDistI interface, requires a trigram
|
| 1700 |
+
FreqDist instance to train on. Optionally, a different from default discount
|
| 1701 |
+
value can be specified. The default discount is set to 0.75.
|
| 1702 |
+
|
| 1703 |
+
"""
|
| 1704 |
+
|
| 1705 |
+
def __init__(self, freqdist, bins=None, discount=0.75):
|
| 1706 |
+
"""
|
| 1707 |
+
:param freqdist: The trigram frequency distribution upon which to base
|
| 1708 |
+
the estimation
|
| 1709 |
+
:type freqdist: FreqDist
|
| 1710 |
+
:param bins: Included for compatibility with nltk.tag.hmm
|
| 1711 |
+
:type bins: int or float
|
| 1712 |
+
:param discount: The discount applied when retrieving counts of
|
| 1713 |
+
trigrams
|
| 1714 |
+
:type discount: float (preferred, but can be set to int)
|
| 1715 |
+
"""
|
| 1716 |
+
|
| 1717 |
+
if not bins:
|
| 1718 |
+
self._bins = freqdist.B()
|
| 1719 |
+
else:
|
| 1720 |
+
self._bins = bins
|
| 1721 |
+
self._D = discount
|
| 1722 |
+
|
| 1723 |
+
# cache for probability calculation
|
| 1724 |
+
self._cache = {}
|
| 1725 |
+
|
| 1726 |
+
# internal bigram and trigram frequency distributions
|
| 1727 |
+
self._bigrams = defaultdict(int)
|
| 1728 |
+
self._trigrams = freqdist
|
| 1729 |
+
|
| 1730 |
+
# helper dictionaries used to calculate probabilities
|
| 1731 |
+
self._wordtypes_after = defaultdict(float)
|
| 1732 |
+
self._trigrams_contain = defaultdict(float)
|
| 1733 |
+
self._wordtypes_before = defaultdict(float)
|
| 1734 |
+
for w0, w1, w2 in freqdist:
|
| 1735 |
+
self._bigrams[(w0, w1)] += freqdist[(w0, w1, w2)]
|
| 1736 |
+
self._wordtypes_after[(w0, w1)] += 1
|
| 1737 |
+
self._trigrams_contain[w1] += 1
|
| 1738 |
+
self._wordtypes_before[(w1, w2)] += 1
|
| 1739 |
+
|
| 1740 |
+
def prob(self, trigram):
|
| 1741 |
+
# sample must be a triple
|
| 1742 |
+
if len(trigram) != 3:
|
| 1743 |
+
raise ValueError("Expected an iterable with 3 members.")
|
| 1744 |
+
trigram = tuple(trigram)
|
| 1745 |
+
w0, w1, w2 = trigram
|
| 1746 |
+
|
| 1747 |
+
if trigram in self._cache:
|
| 1748 |
+
return self._cache[trigram]
|
| 1749 |
+
else:
|
| 1750 |
+
# if the sample trigram was seen during training
|
| 1751 |
+
if trigram in self._trigrams:
|
| 1752 |
+
prob = (self._trigrams[trigram] - self.discount()) / self._bigrams[
|
| 1753 |
+
(w0, w1)
|
| 1754 |
+
]
|
| 1755 |
+
|
| 1756 |
+
# else if the 'rougher' environment was seen during training
|
| 1757 |
+
elif (w0, w1) in self._bigrams and (w1, w2) in self._wordtypes_before:
|
| 1758 |
+
aftr = self._wordtypes_after[(w0, w1)]
|
| 1759 |
+
bfr = self._wordtypes_before[(w1, w2)]
|
| 1760 |
+
|
| 1761 |
+
# the probability left over from alphas
|
| 1762 |
+
leftover_prob = (aftr * self.discount()) / self._bigrams[(w0, w1)]
|
| 1763 |
+
|
| 1764 |
+
# the beta (including normalization)
|
| 1765 |
+
beta = bfr / (self._trigrams_contain[w1] - aftr)
|
| 1766 |
+
|
| 1767 |
+
prob = leftover_prob * beta
|
| 1768 |
+
|
| 1769 |
+
# else the sample was completely unseen during training
|
| 1770 |
+
else:
|
| 1771 |
+
prob = 0.0
|
| 1772 |
+
|
| 1773 |
+
self._cache[trigram] = prob
|
| 1774 |
+
return prob
|
| 1775 |
+
|
| 1776 |
+
def discount(self):
|
| 1777 |
+
"""
|
| 1778 |
+
Return the value by which counts are discounted. By default set to 0.75.
|
| 1779 |
+
|
| 1780 |
+
:rtype: float
|
| 1781 |
+
"""
|
| 1782 |
+
return self._D
|
| 1783 |
+
|
| 1784 |
+
def set_discount(self, discount):
|
| 1785 |
+
"""
|
| 1786 |
+
Set the value by which counts are discounted to the value of discount.
|
| 1787 |
+
|
| 1788 |
+
:param discount: the new value to discount counts by
|
| 1789 |
+
:type discount: float (preferred, but int possible)
|
| 1790 |
+
:rtype: None
|
| 1791 |
+
"""
|
| 1792 |
+
self._D = discount
|
| 1793 |
+
|
| 1794 |
+
def samples(self):
|
| 1795 |
+
return self._trigrams.keys()
|
| 1796 |
+
|
| 1797 |
+
def max(self):
|
| 1798 |
+
return self._trigrams.max()
|
| 1799 |
+
|
| 1800 |
+
def __repr__(self):
|
| 1801 |
+
"""
|
| 1802 |
+
Return a string representation of this ProbDist
|
| 1803 |
+
|
| 1804 |
+
:rtype: str
|
| 1805 |
+
"""
|
| 1806 |
+
return f"<KneserNeyProbDist based on {self._trigrams.N()} trigrams"
|
| 1807 |
+
|
| 1808 |
+
|
| 1809 |
+
##//////////////////////////////////////////////////////
|
| 1810 |
+
## Probability Distribution Operations
|
| 1811 |
+
##//////////////////////////////////////////////////////
|
| 1812 |
+
|
| 1813 |
+
|
| 1814 |
+
def log_likelihood(test_pdist, actual_pdist):
|
| 1815 |
+
if not isinstance(test_pdist, ProbDistI) or not isinstance(actual_pdist, ProbDistI):
|
| 1816 |
+
raise ValueError("expected a ProbDist.")
|
| 1817 |
+
# Is this right?
|
| 1818 |
+
return sum(
|
| 1819 |
+
actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2) for s in actual_pdist
|
| 1820 |
+
)
|
| 1821 |
+
|
| 1822 |
+
|
| 1823 |
+
def entropy(pdist):
|
| 1824 |
+
probs = (pdist.prob(s) for s in pdist.samples())
|
| 1825 |
+
return -sum(p * math.log(p, 2) for p in probs)
|
| 1826 |
+
|
| 1827 |
+
|
| 1828 |
+
##//////////////////////////////////////////////////////
|
| 1829 |
+
## Conditional Distributions
|
| 1830 |
+
##//////////////////////////////////////////////////////
|
| 1831 |
+
|
| 1832 |
+
|
| 1833 |
+
class ConditionalFreqDist(defaultdict):
|
| 1834 |
+
"""
|
| 1835 |
+
A collection of frequency distributions for a single experiment
|
| 1836 |
+
run under different conditions. Conditional frequency
|
| 1837 |
+
distributions are used to record the number of times each sample
|
| 1838 |
+
occurred, given the condition under which the experiment was run.
|
| 1839 |
+
For example, a conditional frequency distribution could be used to
|
| 1840 |
+
record the frequency of each word (type) in a document, given its
|
| 1841 |
+
length. Formally, a conditional frequency distribution can be
|
| 1842 |
+
defined as a function that maps from each condition to the
|
| 1843 |
+
FreqDist for the experiment under that condition.
|
| 1844 |
+
|
| 1845 |
+
Conditional frequency distributions are typically constructed by
|
| 1846 |
+
repeatedly running an experiment under a variety of conditions,
|
| 1847 |
+
and incrementing the sample outcome counts for the appropriate
|
| 1848 |
+
conditions. For example, the following code will produce a
|
| 1849 |
+
conditional frequency distribution that encodes how often each
|
| 1850 |
+
word type occurs, given the length of that word type:
|
| 1851 |
+
|
| 1852 |
+
>>> from nltk.probability import ConditionalFreqDist
|
| 1853 |
+
>>> from nltk.tokenize import word_tokenize
|
| 1854 |
+
>>> sent = "the the the dog dog some other words that we do not care about"
|
| 1855 |
+
>>> cfdist = ConditionalFreqDist()
|
| 1856 |
+
>>> for word in word_tokenize(sent):
|
| 1857 |
+
... condition = len(word)
|
| 1858 |
+
... cfdist[condition][word] += 1
|
| 1859 |
+
|
| 1860 |
+
An equivalent way to do this is with the initializer:
|
| 1861 |
+
|
| 1862 |
+
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
|
| 1863 |
+
|
| 1864 |
+
The frequency distribution for each condition is accessed using
|
| 1865 |
+
the indexing operator:
|
| 1866 |
+
|
| 1867 |
+
>>> cfdist[3]
|
| 1868 |
+
FreqDist({'the': 3, 'dog': 2, 'not': 1})
|
| 1869 |
+
>>> cfdist[3].freq('the')
|
| 1870 |
+
0.5
|
| 1871 |
+
>>> cfdist[3]['dog']
|
| 1872 |
+
2
|
| 1873 |
+
|
| 1874 |
+
When the indexing operator is used to access the frequency
|
| 1875 |
+
distribution for a condition that has not been accessed before,
|
| 1876 |
+
``ConditionalFreqDist`` creates a new empty FreqDist for that
|
| 1877 |
+
condition.
|
| 1878 |
+
|
| 1879 |
+
"""
|
| 1880 |
+
|
| 1881 |
+
def __init__(self, cond_samples=None):
|
| 1882 |
+
"""
|
| 1883 |
+
Construct a new empty conditional frequency distribution. In
|
| 1884 |
+
particular, the count for every sample, under every condition,
|
| 1885 |
+
is zero.
|
| 1886 |
+
|
| 1887 |
+
:param cond_samples: The samples to initialize the conditional
|
| 1888 |
+
frequency distribution with
|
| 1889 |
+
:type cond_samples: Sequence of (condition, sample) tuples
|
| 1890 |
+
"""
|
| 1891 |
+
defaultdict.__init__(self, FreqDist)
|
| 1892 |
+
|
| 1893 |
+
if cond_samples:
|
| 1894 |
+
for (cond, sample) in cond_samples:
|
| 1895 |
+
self[cond][sample] += 1
|
| 1896 |
+
|
| 1897 |
+
def __reduce__(self):
|
| 1898 |
+
kv_pairs = ((cond, self[cond]) for cond in self.conditions())
|
| 1899 |
+
return (self.__class__, (), None, None, kv_pairs)
|
| 1900 |
+
|
| 1901 |
+
def conditions(self):
|
| 1902 |
+
"""
|
| 1903 |
+
Return a list of the conditions that have been accessed for
|
| 1904 |
+
this ``ConditionalFreqDist``. Use the indexing operator to
|
| 1905 |
+
access the frequency distribution for a given condition.
|
| 1906 |
+
Note that the frequency distributions for some conditions
|
| 1907 |
+
may contain zero sample outcomes.
|
| 1908 |
+
|
| 1909 |
+
:rtype: list
|
| 1910 |
+
"""
|
| 1911 |
+
return list(self.keys())
|
| 1912 |
+
|
| 1913 |
+
def N(self):
|
| 1914 |
+
"""
|
| 1915 |
+
Return the total number of sample outcomes that have been
|
| 1916 |
+
recorded by this ``ConditionalFreqDist``.
|
| 1917 |
+
|
| 1918 |
+
:rtype: int
|
| 1919 |
+
"""
|
| 1920 |
+
return sum(fdist.N() for fdist in self.values())
|
| 1921 |
+
|
| 1922 |
+
def plot(
|
| 1923 |
+
self,
|
| 1924 |
+
*args,
|
| 1925 |
+
samples=None,
|
| 1926 |
+
title="",
|
| 1927 |
+
cumulative=False,
|
| 1928 |
+
percents=False,
|
| 1929 |
+
conditions=None,
|
| 1930 |
+
show=True,
|
| 1931 |
+
**kwargs,
|
| 1932 |
+
):
|
| 1933 |
+
"""
|
| 1934 |
+
Plot the given samples from the conditional frequency distribution.
|
| 1935 |
+
For a cumulative plot, specify cumulative=True. Additional ``*args`` and
|
| 1936 |
+
``**kwargs`` are passed to matplotlib's plot function.
|
| 1937 |
+
(Requires Matplotlib to be installed.)
|
| 1938 |
+
|
| 1939 |
+
:param samples: The samples to plot
|
| 1940 |
+
:type samples: list
|
| 1941 |
+
:param title: The title for the graph
|
| 1942 |
+
:type title: str
|
| 1943 |
+
:param cumulative: Whether the plot is cumulative. (default = False)
|
| 1944 |
+
:type cumulative: bool
|
| 1945 |
+
:param percents: Whether the plot uses percents instead of counts. (default = False)
|
| 1946 |
+
:type percents: bool
|
| 1947 |
+
:param conditions: The conditions to plot (default is all)
|
| 1948 |
+
:type conditions: list
|
| 1949 |
+
:param show: Whether to show the plot, or only return the ax.
|
| 1950 |
+
:type show: bool
|
| 1951 |
+
"""
|
| 1952 |
+
try:
|
| 1953 |
+
import matplotlib.pyplot as plt # import statement fix
|
| 1954 |
+
except ImportError as e:
|
| 1955 |
+
raise ValueError(
|
| 1956 |
+
"The plot function requires matplotlib to be installed."
|
| 1957 |
+
"See https://matplotlib.org/"
|
| 1958 |
+
) from e
|
| 1959 |
+
|
| 1960 |
+
if not conditions:
|
| 1961 |
+
conditions = self.conditions()
|
| 1962 |
+
else:
|
| 1963 |
+
conditions = [c for c in conditions if c in self]
|
| 1964 |
+
if not samples:
|
| 1965 |
+
samples = sorted({v for c in conditions for v in self[c]})
|
| 1966 |
+
if "linewidth" not in kwargs:
|
| 1967 |
+
kwargs["linewidth"] = 2
|
| 1968 |
+
ax = plt.gca()
|
| 1969 |
+
if conditions:
|
| 1970 |
+
freqs = []
|
| 1971 |
+
for condition in conditions:
|
| 1972 |
+
if cumulative:
|
| 1973 |
+
# freqs should be a list of list where each sub list will be a frequency of a condition
|
| 1974 |
+
freq = list(self[condition]._cumulative_frequencies(samples))
|
| 1975 |
+
else:
|
| 1976 |
+
freq = [self[condition][sample] for sample in samples]
|
| 1977 |
+
|
| 1978 |
+
if percents:
|
| 1979 |
+
freq = [f / self[condition].N() * 100 for f in freq]
|
| 1980 |
+
|
| 1981 |
+
freqs.append(freq)
|
| 1982 |
+
|
| 1983 |
+
if cumulative:
|
| 1984 |
+
ylabel = "Cumulative "
|
| 1985 |
+
legend_loc = "lower right"
|
| 1986 |
+
else:
|
| 1987 |
+
ylabel = ""
|
| 1988 |
+
legend_loc = "upper right"
|
| 1989 |
+
|
| 1990 |
+
if percents:
|
| 1991 |
+
ylabel += "Percents"
|
| 1992 |
+
else:
|
| 1993 |
+
ylabel += "Counts"
|
| 1994 |
+
|
| 1995 |
+
i = 0
|
| 1996 |
+
for freq in freqs:
|
| 1997 |
+
kwargs["label"] = conditions[i] # label for each condition
|
| 1998 |
+
i += 1
|
| 1999 |
+
ax.plot(freq, *args, **kwargs)
|
| 2000 |
+
ax.legend(loc=legend_loc)
|
| 2001 |
+
ax.grid(True, color="silver")
|
| 2002 |
+
ax.set_xticks(range(len(samples)))
|
| 2003 |
+
ax.set_xticklabels([str(s) for s in samples], rotation=90)
|
| 2004 |
+
if title:
|
| 2005 |
+
ax.set_title(title)
|
| 2006 |
+
ax.set_xlabel("Samples")
|
| 2007 |
+
ax.set_ylabel(ylabel)
|
| 2008 |
+
|
| 2009 |
+
if show:
|
| 2010 |
+
plt.show()
|
| 2011 |
+
|
| 2012 |
+
return ax
|
| 2013 |
+
|
| 2014 |
+
def tabulate(self, *args, **kwargs):
|
| 2015 |
+
"""
|
| 2016 |
+
Tabulate the given samples from the conditional frequency distribution.
|
| 2017 |
+
|
| 2018 |
+
:param samples: The samples to plot
|
| 2019 |
+
:type samples: list
|
| 2020 |
+
:param conditions: The conditions to plot (default is all)
|
| 2021 |
+
:type conditions: list
|
| 2022 |
+
:param cumulative: A flag to specify whether the freqs are cumulative (default = False)
|
| 2023 |
+
:type title: bool
|
| 2024 |
+
"""
|
| 2025 |
+
|
| 2026 |
+
cumulative = _get_kwarg(kwargs, "cumulative", False)
|
| 2027 |
+
conditions = _get_kwarg(kwargs, "conditions", sorted(self.conditions()))
|
| 2028 |
+
samples = _get_kwarg(
|
| 2029 |
+
kwargs,
|
| 2030 |
+
"samples",
|
| 2031 |
+
sorted({v for c in conditions if c in self for v in self[c]}),
|
| 2032 |
+
) # this computation could be wasted
|
| 2033 |
+
|
| 2034 |
+
width = max(len("%s" % s) for s in samples)
|
| 2035 |
+
freqs = dict()
|
| 2036 |
+
for c in conditions:
|
| 2037 |
+
if cumulative:
|
| 2038 |
+
freqs[c] = list(self[c]._cumulative_frequencies(samples))
|
| 2039 |
+
else:
|
| 2040 |
+
freqs[c] = [self[c][sample] for sample in samples]
|
| 2041 |
+
width = max(width, max(len("%d" % f) for f in freqs[c]))
|
| 2042 |
+
|
| 2043 |
+
condition_size = max(len("%s" % c) for c in conditions)
|
| 2044 |
+
print(" " * condition_size, end=" ")
|
| 2045 |
+
for s in samples:
|
| 2046 |
+
print("%*s" % (width, s), end=" ")
|
| 2047 |
+
print()
|
| 2048 |
+
for c in conditions:
|
| 2049 |
+
print("%*s" % (condition_size, c), end=" ")
|
| 2050 |
+
for f in freqs[c]:
|
| 2051 |
+
print("%*d" % (width, f), end=" ")
|
| 2052 |
+
print()
|
| 2053 |
+
|
| 2054 |
+
# Mathematical operators
|
| 2055 |
+
|
| 2056 |
+
def __add__(self, other):
|
| 2057 |
+
"""
|
| 2058 |
+
Add counts from two ConditionalFreqDists.
|
| 2059 |
+
"""
|
| 2060 |
+
if not isinstance(other, ConditionalFreqDist):
|
| 2061 |
+
return NotImplemented
|
| 2062 |
+
result = self.copy()
|
| 2063 |
+
for cond in other.conditions():
|
| 2064 |
+
result[cond] += other[cond]
|
| 2065 |
+
return result
|
| 2066 |
+
|
| 2067 |
+
def __sub__(self, other):
|
| 2068 |
+
"""
|
| 2069 |
+
Subtract count, but keep only results with positive counts.
|
| 2070 |
+
"""
|
| 2071 |
+
if not isinstance(other, ConditionalFreqDist):
|
| 2072 |
+
return NotImplemented
|
| 2073 |
+
result = self.copy()
|
| 2074 |
+
for cond in other.conditions():
|
| 2075 |
+
result[cond] -= other[cond]
|
| 2076 |
+
if not result[cond]:
|
| 2077 |
+
del result[cond]
|
| 2078 |
+
return result
|
| 2079 |
+
|
| 2080 |
+
def __or__(self, other):
|
| 2081 |
+
"""
|
| 2082 |
+
Union is the maximum of value in either of the input counters.
|
| 2083 |
+
"""
|
| 2084 |
+
if not isinstance(other, ConditionalFreqDist):
|
| 2085 |
+
return NotImplemented
|
| 2086 |
+
result = self.copy()
|
| 2087 |
+
for cond in other.conditions():
|
| 2088 |
+
result[cond] |= other[cond]
|
| 2089 |
+
return result
|
| 2090 |
+
|
| 2091 |
+
def __and__(self, other):
|
| 2092 |
+
"""
|
| 2093 |
+
Intersection is the minimum of corresponding counts.
|
| 2094 |
+
"""
|
| 2095 |
+
if not isinstance(other, ConditionalFreqDist):
|
| 2096 |
+
return NotImplemented
|
| 2097 |
+
result = ConditionalFreqDist()
|
| 2098 |
+
for cond in self.conditions():
|
| 2099 |
+
newfreqdist = self[cond] & other[cond]
|
| 2100 |
+
if newfreqdist:
|
| 2101 |
+
result[cond] = newfreqdist
|
| 2102 |
+
return result
|
| 2103 |
+
|
| 2104 |
+
# @total_ordering doesn't work here, since the class inherits from a builtin class
|
| 2105 |
+
def __le__(self, other):
|
| 2106 |
+
if not isinstance(other, ConditionalFreqDist):
|
| 2107 |
+
raise_unorderable_types("<=", self, other)
|
| 2108 |
+
return set(self.conditions()).issubset(other.conditions()) and all(
|
| 2109 |
+
self[c] <= other[c] for c in self.conditions()
|
| 2110 |
+
)
|
| 2111 |
+
|
| 2112 |
+
def __lt__(self, other):
|
| 2113 |
+
if not isinstance(other, ConditionalFreqDist):
|
| 2114 |
+
raise_unorderable_types("<", self, other)
|
| 2115 |
+
return self <= other and self != other
|
| 2116 |
+
|
| 2117 |
+
def __ge__(self, other):
|
| 2118 |
+
if not isinstance(other, ConditionalFreqDist):
|
| 2119 |
+
raise_unorderable_types(">=", self, other)
|
| 2120 |
+
return other <= self
|
| 2121 |
+
|
| 2122 |
+
def __gt__(self, other):
|
| 2123 |
+
if not isinstance(other, ConditionalFreqDist):
|
| 2124 |
+
raise_unorderable_types(">", self, other)
|
| 2125 |
+
return other < self
|
| 2126 |
+
|
| 2127 |
+
def deepcopy(self):
|
| 2128 |
+
from copy import deepcopy
|
| 2129 |
+
|
| 2130 |
+
return deepcopy(self)
|
| 2131 |
+
|
| 2132 |
+
copy = deepcopy
|
| 2133 |
+
|
| 2134 |
+
def __repr__(self):
|
| 2135 |
+
"""
|
| 2136 |
+
Return a string representation of this ``ConditionalFreqDist``.
|
| 2137 |
+
|
| 2138 |
+
:rtype: str
|
| 2139 |
+
"""
|
| 2140 |
+
return "<ConditionalFreqDist with %d conditions>" % len(self)
|
| 2141 |
+
|
| 2142 |
+
|
| 2143 |
+
class ConditionalProbDistI(dict, metaclass=ABCMeta):
|
| 2144 |
+
"""
|
| 2145 |
+
A collection of probability distributions for a single experiment
|
| 2146 |
+
run under different conditions. Conditional probability
|
| 2147 |
+
distributions are used to estimate the likelihood of each sample,
|
| 2148 |
+
given the condition under which the experiment was run. For
|
| 2149 |
+
example, a conditional probability distribution could be used to
|
| 2150 |
+
estimate the probability of each word type in a document, given
|
| 2151 |
+
the length of the word type. Formally, a conditional probability
|
| 2152 |
+
distribution can be defined as a function that maps from each
|
| 2153 |
+
condition to the ``ProbDist`` for the experiment under that
|
| 2154 |
+
condition.
|
| 2155 |
+
"""
|
| 2156 |
+
|
| 2157 |
+
@abstractmethod
|
| 2158 |
+
def __init__(self):
|
| 2159 |
+
"""
|
| 2160 |
+
Classes inheriting from ConditionalProbDistI should implement __init__.
|
| 2161 |
+
"""
|
| 2162 |
+
|
| 2163 |
+
def conditions(self):
|
| 2164 |
+
"""
|
| 2165 |
+
Return a list of the conditions that are represented by
|
| 2166 |
+
this ``ConditionalProbDist``. Use the indexing operator to
|
| 2167 |
+
access the probability distribution for a given condition.
|
| 2168 |
+
|
| 2169 |
+
:rtype: list
|
| 2170 |
+
"""
|
| 2171 |
+
return list(self.keys())
|
| 2172 |
+
|
| 2173 |
+
def __repr__(self):
|
| 2174 |
+
"""
|
| 2175 |
+
Return a string representation of this ``ConditionalProbDist``.
|
| 2176 |
+
|
| 2177 |
+
:rtype: str
|
| 2178 |
+
"""
|
| 2179 |
+
return "<%s with %d conditions>" % (type(self).__name__, len(self))
|
| 2180 |
+
|
| 2181 |
+
|
| 2182 |
+
class ConditionalProbDist(ConditionalProbDistI):
|
| 2183 |
+
"""
|
| 2184 |
+
A conditional probability distribution modeling the experiments
|
| 2185 |
+
that were used to generate a conditional frequency distribution.
|
| 2186 |
+
A ConditionalProbDist is constructed from a
|
| 2187 |
+
``ConditionalFreqDist`` and a ``ProbDist`` factory:
|
| 2188 |
+
|
| 2189 |
+
- The ``ConditionalFreqDist`` specifies the frequency
|
| 2190 |
+
distribution for each condition.
|
| 2191 |
+
- The ``ProbDist`` factory is a function that takes a
|
| 2192 |
+
condition's frequency distribution, and returns its
|
| 2193 |
+
probability distribution. A ``ProbDist`` class's name (such as
|
| 2194 |
+
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
|
| 2195 |
+
that class's constructor.
|
| 2196 |
+
|
| 2197 |
+
The first argument to the ``ProbDist`` factory is the frequency
|
| 2198 |
+
distribution that it should model; and the remaining arguments are
|
| 2199 |
+
specified by the ``factory_args`` parameter to the
|
| 2200 |
+
``ConditionalProbDist`` constructor. For example, the following
|
| 2201 |
+
code constructs a ``ConditionalProbDist``, where the probability
|
| 2202 |
+
distribution for each condition is an ``ELEProbDist`` with 10 bins:
|
| 2203 |
+
|
| 2204 |
+
>>> from nltk.corpus import brown
|
| 2205 |
+
>>> from nltk.probability import ConditionalFreqDist
|
| 2206 |
+
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
|
| 2207 |
+
>>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000])
|
| 2208 |
+
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
|
| 2209 |
+
>>> cpdist['passed'].max()
|
| 2210 |
+
'VBD'
|
| 2211 |
+
>>> cpdist['passed'].prob('VBD') #doctest: +ELLIPSIS
|
| 2212 |
+
0.423...
|
| 2213 |
+
|
| 2214 |
+
"""
|
| 2215 |
+
|
| 2216 |
+
def __init__(self, cfdist, probdist_factory, *factory_args, **factory_kw_args):
|
| 2217 |
+
"""
|
| 2218 |
+
Construct a new conditional probability distribution, based on
|
| 2219 |
+
the given conditional frequency distribution and ``ProbDist``
|
| 2220 |
+
factory.
|
| 2221 |
+
|
| 2222 |
+
:type cfdist: ConditionalFreqDist
|
| 2223 |
+
:param cfdist: The ``ConditionalFreqDist`` specifying the
|
| 2224 |
+
frequency distribution for each condition.
|
| 2225 |
+
:type probdist_factory: class or function
|
| 2226 |
+
:param probdist_factory: The function or class that maps
|
| 2227 |
+
a condition's frequency distribution to its probability
|
| 2228 |
+
distribution. The function is called with the frequency
|
| 2229 |
+
distribution as its first argument,
|
| 2230 |
+
``factory_args`` as its remaining arguments, and
|
| 2231 |
+
``factory_kw_args`` as keyword arguments.
|
| 2232 |
+
:type factory_args: (any)
|
| 2233 |
+
:param factory_args: Extra arguments for ``probdist_factory``.
|
| 2234 |
+
These arguments are usually used to specify extra
|
| 2235 |
+
properties for the probability distributions of individual
|
| 2236 |
+
conditions, such as the number of bins they contain.
|
| 2237 |
+
:type factory_kw_args: (any)
|
| 2238 |
+
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
|
| 2239 |
+
"""
|
| 2240 |
+
self._probdist_factory = probdist_factory
|
| 2241 |
+
self._factory_args = factory_args
|
| 2242 |
+
self._factory_kw_args = factory_kw_args
|
| 2243 |
+
|
| 2244 |
+
for condition in cfdist:
|
| 2245 |
+
self[condition] = probdist_factory(
|
| 2246 |
+
cfdist[condition], *factory_args, **factory_kw_args
|
| 2247 |
+
)
|
| 2248 |
+
|
| 2249 |
+
def __missing__(self, key):
|
| 2250 |
+
self[key] = self._probdist_factory(
|
| 2251 |
+
FreqDist(), *self._factory_args, **self._factory_kw_args
|
| 2252 |
+
)
|
| 2253 |
+
return self[key]
|
| 2254 |
+
|
| 2255 |
+
|
| 2256 |
+
class DictionaryConditionalProbDist(ConditionalProbDistI):
|
| 2257 |
+
"""
|
| 2258 |
+
An alternative ConditionalProbDist that simply wraps a dictionary of
|
| 2259 |
+
ProbDists rather than creating these from FreqDists.
|
| 2260 |
+
"""
|
| 2261 |
+
|
| 2262 |
+
def __init__(self, probdist_dict):
|
| 2263 |
+
"""
|
| 2264 |
+
:param probdist_dict: a dictionary containing the probdists indexed
|
| 2265 |
+
by the conditions
|
| 2266 |
+
:type probdist_dict: dict any -> probdist
|
| 2267 |
+
"""
|
| 2268 |
+
self.update(probdist_dict)
|
| 2269 |
+
|
| 2270 |
+
def __missing__(self, key):
|
| 2271 |
+
self[key] = DictionaryProbDist()
|
| 2272 |
+
return self[key]
|
| 2273 |
+
|
| 2274 |
+
|
| 2275 |
+
##//////////////////////////////////////////////////////
|
| 2276 |
+
## Adding in log-space.
|
| 2277 |
+
##//////////////////////////////////////////////////////
|
| 2278 |
+
|
| 2279 |
+
# If the difference is bigger than this, then just take the bigger one:
|
| 2280 |
+
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
|
| 2281 |
+
|
| 2282 |
+
|
| 2283 |
+
def add_logs(logx, logy):
|
| 2284 |
+
"""
|
| 2285 |
+
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
|
| 2286 |
+
*log(x+y)*. Conceptually, this is the same as returning
|
| 2287 |
+
``log(2**(logx)+2**(logy))``, but the actual implementation
|
| 2288 |
+
avoids overflow errors that could result from direct computation.
|
| 2289 |
+
"""
|
| 2290 |
+
if logx < logy + _ADD_LOGS_MAX_DIFF:
|
| 2291 |
+
return logy
|
| 2292 |
+
if logy < logx + _ADD_LOGS_MAX_DIFF:
|
| 2293 |
+
return logx
|
| 2294 |
+
base = min(logx, logy)
|
| 2295 |
+
return base + math.log(2 ** (logx - base) + 2 ** (logy - base), 2)
|
| 2296 |
+
|
| 2297 |
+
|
| 2298 |
+
def sum_logs(logs):
|
| 2299 |
+
return reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF
|
| 2300 |
+
|
| 2301 |
+
|
| 2302 |
+
##//////////////////////////////////////////////////////
|
| 2303 |
+
## Probabilistic Mix-in
|
| 2304 |
+
##//////////////////////////////////////////////////////
|
| 2305 |
+
|
| 2306 |
+
|
| 2307 |
+
class ProbabilisticMixIn:
|
| 2308 |
+
"""
|
| 2309 |
+
A mix-in class to associate probabilities with other classes
|
| 2310 |
+
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
|
| 2311 |
+
define a new class that derives from an existing class and from
|
| 2312 |
+
ProbabilisticMixIn. You will need to define a new constructor for
|
| 2313 |
+
the new class, which explicitly calls the constructors of both its
|
| 2314 |
+
parent classes. For example:
|
| 2315 |
+
|
| 2316 |
+
>>> from nltk.probability import ProbabilisticMixIn
|
| 2317 |
+
>>> class A:
|
| 2318 |
+
... def __init__(self, x, y): self.data = (x,y)
|
| 2319 |
+
...
|
| 2320 |
+
>>> class ProbabilisticA(A, ProbabilisticMixIn):
|
| 2321 |
+
... def __init__(self, x, y, **prob_kwarg):
|
| 2322 |
+
... A.__init__(self, x, y)
|
| 2323 |
+
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
|
| 2324 |
+
|
| 2325 |
+
See the documentation for the ProbabilisticMixIn
|
| 2326 |
+
``constructor<__init__>`` for information about the arguments it
|
| 2327 |
+
expects.
|
| 2328 |
+
|
| 2329 |
+
You should generally also redefine the string representation
|
| 2330 |
+
methods, the comparison methods, and the hashing method.
|
| 2331 |
+
"""
|
| 2332 |
+
|
| 2333 |
+
def __init__(self, **kwargs):
|
| 2334 |
+
"""
|
| 2335 |
+
Initialize this object's probability. This initializer should
|
| 2336 |
+
be called by subclass constructors. ``prob`` should generally be
|
| 2337 |
+
the first argument for those constructors.
|
| 2338 |
+
|
| 2339 |
+
:param prob: The probability associated with the object.
|
| 2340 |
+
:type prob: float
|
| 2341 |
+
:param logprob: The log of the probability associated with
|
| 2342 |
+
the object.
|
| 2343 |
+
:type logprob: float
|
| 2344 |
+
"""
|
| 2345 |
+
if "prob" in kwargs:
|
| 2346 |
+
if "logprob" in kwargs:
|
| 2347 |
+
raise TypeError("Must specify either prob or logprob " "(not both)")
|
| 2348 |
+
else:
|
| 2349 |
+
ProbabilisticMixIn.set_prob(self, kwargs["prob"])
|
| 2350 |
+
elif "logprob" in kwargs:
|
| 2351 |
+
ProbabilisticMixIn.set_logprob(self, kwargs["logprob"])
|
| 2352 |
+
else:
|
| 2353 |
+
self.__prob = self.__logprob = None
|
| 2354 |
+
|
| 2355 |
+
def set_prob(self, prob):
|
| 2356 |
+
"""
|
| 2357 |
+
Set the probability associated with this object to ``prob``.
|
| 2358 |
+
|
| 2359 |
+
:param prob: The new probability
|
| 2360 |
+
:type prob: float
|
| 2361 |
+
"""
|
| 2362 |
+
self.__prob = prob
|
| 2363 |
+
self.__logprob = None
|
| 2364 |
+
|
| 2365 |
+
def set_logprob(self, logprob):
|
| 2366 |
+
"""
|
| 2367 |
+
Set the log probability associated with this object to
|
| 2368 |
+
``logprob``. I.e., set the probability associated with this
|
| 2369 |
+
object to ``2**(logprob)``.
|
| 2370 |
+
|
| 2371 |
+
:param logprob: The new log probability
|
| 2372 |
+
:type logprob: float
|
| 2373 |
+
"""
|
| 2374 |
+
self.__logprob = logprob
|
| 2375 |
+
self.__prob = None
|
| 2376 |
+
|
| 2377 |
+
def prob(self):
|
| 2378 |
+
"""
|
| 2379 |
+
Return the probability associated with this object.
|
| 2380 |
+
|
| 2381 |
+
:rtype: float
|
| 2382 |
+
"""
|
| 2383 |
+
if self.__prob is None:
|
| 2384 |
+
if self.__logprob is None:
|
| 2385 |
+
return None
|
| 2386 |
+
self.__prob = 2 ** (self.__logprob)
|
| 2387 |
+
return self.__prob
|
| 2388 |
+
|
| 2389 |
+
def logprob(self):
|
| 2390 |
+
"""
|
| 2391 |
+
Return ``log(p)``, where ``p`` is the probability associated
|
| 2392 |
+
with this object.
|
| 2393 |
+
|
| 2394 |
+
:rtype: float
|
| 2395 |
+
"""
|
| 2396 |
+
if self.__logprob is None:
|
| 2397 |
+
if self.__prob is None:
|
| 2398 |
+
return None
|
| 2399 |
+
self.__logprob = math.log(self.__prob, 2)
|
| 2400 |
+
return self.__logprob
|
| 2401 |
+
|
| 2402 |
+
|
| 2403 |
+
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
|
| 2404 |
+
def set_prob(self, prob):
|
| 2405 |
+
raise ValueError("%s is immutable" % self.__class__.__name__)
|
| 2406 |
+
|
| 2407 |
+
def set_logprob(self, prob):
|
| 2408 |
+
raise ValueError("%s is immutable" % self.__class__.__name__)
|
| 2409 |
+
|
| 2410 |
+
|
| 2411 |
+
## Helper function for processing keyword arguments
|
| 2412 |
+
|
| 2413 |
+
|
| 2414 |
+
def _get_kwarg(kwargs, key, default):
|
| 2415 |
+
if key in kwargs:
|
| 2416 |
+
arg = kwargs[key]
|
| 2417 |
+
del kwargs[key]
|
| 2418 |
+
else:
|
| 2419 |
+
arg = default
|
| 2420 |
+
return arg
|
| 2421 |
+
|
| 2422 |
+
|
| 2423 |
+
##//////////////////////////////////////////////////////
|
| 2424 |
+
## Demonstration
|
| 2425 |
+
##//////////////////////////////////////////////////////
|
| 2426 |
+
|
| 2427 |
+
|
| 2428 |
+
def _create_rand_fdist(numsamples, numoutcomes):
|
| 2429 |
+
"""
|
| 2430 |
+
Create a new frequency distribution, with random samples. The
|
| 2431 |
+
samples are numbers from 1 to ``numsamples``, and are generated by
|
| 2432 |
+
summing two numbers, each of which has a uniform distribution.
|
| 2433 |
+
"""
|
| 2434 |
+
|
| 2435 |
+
fdist = FreqDist()
|
| 2436 |
+
for x in range(numoutcomes):
|
| 2437 |
+
y = random.randint(1, (1 + numsamples) // 2) + random.randint(
|
| 2438 |
+
0, numsamples // 2
|
| 2439 |
+
)
|
| 2440 |
+
fdist[y] += 1
|
| 2441 |
+
return fdist
|
| 2442 |
+
|
| 2443 |
+
|
| 2444 |
+
def _create_sum_pdist(numsamples):
|
| 2445 |
+
"""
|
| 2446 |
+
Return the true probability distribution for the experiment
|
| 2447 |
+
``_create_rand_fdist(numsamples, x)``.
|
| 2448 |
+
"""
|
| 2449 |
+
fdist = FreqDist()
|
| 2450 |
+
for x in range(1, (1 + numsamples) // 2 + 1):
|
| 2451 |
+
for y in range(0, numsamples // 2 + 1):
|
| 2452 |
+
fdist[x + y] += 1
|
| 2453 |
+
return MLEProbDist(fdist)
|
| 2454 |
+
|
| 2455 |
+
|
| 2456 |
+
def demo(numsamples=6, numoutcomes=500):
|
| 2457 |
+
"""
|
| 2458 |
+
A demonstration of frequency distributions and probability
|
| 2459 |
+
distributions. This demonstration creates three frequency
|
| 2460 |
+
distributions with, and uses them to sample a random process with
|
| 2461 |
+
``numsamples`` samples. Each frequency distribution is sampled
|
| 2462 |
+
``numoutcomes`` times. These three frequency distributions are
|
| 2463 |
+
then used to build six probability distributions. Finally, the
|
| 2464 |
+
probability estimates of these distributions are compared to the
|
| 2465 |
+
actual probability of each sample.
|
| 2466 |
+
|
| 2467 |
+
:type numsamples: int
|
| 2468 |
+
:param numsamples: The number of samples to use in each demo
|
| 2469 |
+
frequency distributions.
|
| 2470 |
+
:type numoutcomes: int
|
| 2471 |
+
:param numoutcomes: The total number of outcomes for each
|
| 2472 |
+
demo frequency distribution. These outcomes are divided into
|
| 2473 |
+
``numsamples`` bins.
|
| 2474 |
+
:rtype: None
|
| 2475 |
+
"""
|
| 2476 |
+
|
| 2477 |
+
# Randomly sample a stochastic process three times.
|
| 2478 |
+
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
|
| 2479 |
+
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
|
| 2480 |
+
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
|
| 2481 |
+
|
| 2482 |
+
# Use our samples to create probability distributions.
|
| 2483 |
+
pdists = [
|
| 2484 |
+
MLEProbDist(fdist1),
|
| 2485 |
+
LidstoneProbDist(fdist1, 0.5, numsamples),
|
| 2486 |
+
HeldoutProbDist(fdist1, fdist2, numsamples),
|
| 2487 |
+
HeldoutProbDist(fdist2, fdist1, numsamples),
|
| 2488 |
+
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
|
| 2489 |
+
SimpleGoodTuringProbDist(fdist1),
|
| 2490 |
+
SimpleGoodTuringProbDist(fdist1, 7),
|
| 2491 |
+
_create_sum_pdist(numsamples),
|
| 2492 |
+
]
|
| 2493 |
+
|
| 2494 |
+
# Find the probability of each sample.
|
| 2495 |
+
vals = []
|
| 2496 |
+
for n in range(1, numsamples + 1):
|
| 2497 |
+
vals.append(tuple([n, fdist1.freq(n)] + [pdist.prob(n) for pdist in pdists]))
|
| 2498 |
+
|
| 2499 |
+
# Print the results in a formatted table.
|
| 2500 |
+
print(
|
| 2501 |
+
"%d samples (1-%d); %d outcomes were sampled for each FreqDist"
|
| 2502 |
+
% (numsamples, numsamples, numoutcomes)
|
| 2503 |
+
)
|
| 2504 |
+
print("=" * 9 * (len(pdists) + 2))
|
| 2505 |
+
FORMATSTR = " FreqDist " + "%8s " * (len(pdists) - 1) + "| Actual"
|
| 2506 |
+
print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
|
| 2507 |
+
print("-" * 9 * (len(pdists) + 2))
|
| 2508 |
+
FORMATSTR = "%3d %8.6f " + "%8.6f " * (len(pdists) - 1) + "| %8.6f"
|
| 2509 |
+
for val in vals:
|
| 2510 |
+
print(FORMATSTR % val)
|
| 2511 |
+
|
| 2512 |
+
# Print the totals for each column (should all be 1.0)
|
| 2513 |
+
zvals = list(zip(*vals))
|
| 2514 |
+
sums = [sum(val) for val in zvals[1:]]
|
| 2515 |
+
print("-" * 9 * (len(pdists) + 2))
|
| 2516 |
+
FORMATSTR = "Total " + "%8.6f " * (len(pdists)) + "| %8.6f"
|
| 2517 |
+
print(FORMATSTR % tuple(sums))
|
| 2518 |
+
print("=" * 9 * (len(pdists) + 2))
|
| 2519 |
+
|
| 2520 |
+
# Display the distributions themselves, if they're short enough.
|
| 2521 |
+
if len("%s" % fdist1) < 70:
|
| 2522 |
+
print(" fdist1: %s" % fdist1)
|
| 2523 |
+
print(" fdist2: %s" % fdist2)
|
| 2524 |
+
print(" fdist3: %s" % fdist3)
|
| 2525 |
+
print()
|
| 2526 |
+
|
| 2527 |
+
print("Generating:")
|
| 2528 |
+
for pdist in pdists:
|
| 2529 |
+
fdist = FreqDist(pdist.generate() for i in range(5000))
|
| 2530 |
+
print("{:>20} {}".format(pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
|
| 2531 |
+
print()
|
| 2532 |
+
|
| 2533 |
+
|
| 2534 |
+
def gt_demo():
|
| 2535 |
+
from nltk import corpus
|
| 2536 |
+
|
| 2537 |
+
emma_words = corpus.gutenberg.words("austen-emma.txt")
|
| 2538 |
+
fd = FreqDist(emma_words)
|
| 2539 |
+
sgt = SimpleGoodTuringProbDist(fd)
|
| 2540 |
+
print("{:>18} {:>8} {:>14}".format("word", "frequency", "SimpleGoodTuring"))
|
| 2541 |
+
fd_keys_sorted = (
|
| 2542 |
+
key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True)
|
| 2543 |
+
)
|
| 2544 |
+
for key in fd_keys_sorted:
|
| 2545 |
+
print("%18s %8d %14e" % (key, fd[key], sgt.prob(key)))
|
| 2546 |
+
|
| 2547 |
+
|
| 2548 |
+
if __name__ == "__main__":
|
| 2549 |
+
demo(6, 10)
|
| 2550 |
+
demo(5, 5000)
|
| 2551 |
+
gt_demo()
|
| 2552 |
+
|
| 2553 |
+
__all__ = [
|
| 2554 |
+
"ConditionalFreqDist",
|
| 2555 |
+
"ConditionalProbDist",
|
| 2556 |
+
"ConditionalProbDistI",
|
| 2557 |
+
"CrossValidationProbDist",
|
| 2558 |
+
"DictionaryConditionalProbDist",
|
| 2559 |
+
"DictionaryProbDist",
|
| 2560 |
+
"ELEProbDist",
|
| 2561 |
+
"FreqDist",
|
| 2562 |
+
"SimpleGoodTuringProbDist",
|
| 2563 |
+
"HeldoutProbDist",
|
| 2564 |
+
"ImmutableProbabilisticMixIn",
|
| 2565 |
+
"LaplaceProbDist",
|
| 2566 |
+
"LidstoneProbDist",
|
| 2567 |
+
"MLEProbDist",
|
| 2568 |
+
"MutableProbDist",
|
| 2569 |
+
"KneserNeyProbDist",
|
| 2570 |
+
"ProbDistI",
|
| 2571 |
+
"ProbabilisticMixIn",
|
| 2572 |
+
"UniformProbDist",
|
| 2573 |
+
"WittenBellProbDist",
|
| 2574 |
+
"add_logs",
|
| 2575 |
+
"log_likelihood",
|
| 2576 |
+
"sum_logs",
|
| 2577 |
+
"entropy",
|
| 2578 |
+
]
|
lib/python3.10/site-packages/nltk/test/bleu.doctest
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
==========
|
| 2 |
+
BLEU tests
|
| 3 |
+
==========
|
| 4 |
+
|
| 5 |
+
>>> from nltk.translate import bleu
|
| 6 |
+
|
| 7 |
+
If the candidate has no alignment to any of the references, the BLEU score is 0.
|
| 8 |
+
|
| 9 |
+
>>> bleu(
|
| 10 |
+
... ['The candidate has no alignment to any of the references'.split()],
|
| 11 |
+
... 'John loves Mary'.split(),
|
| 12 |
+
... (1,),
|
| 13 |
+
... )
|
| 14 |
+
0
|
| 15 |
+
|
| 16 |
+
This is an implementation of the smoothing techniques
|
| 17 |
+
for segment-level BLEU scores that was presented in
|
| 18 |
+
Boxing Chen and Collin Cherry (2014) A Systematic Comparison of
|
| 19 |
+
Smoothing Techniques for Sentence-Level BLEU. In WMT14.
|
| 20 |
+
http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
|
| 21 |
+
>>> from nltk.translate.bleu_score import sentence_bleu,SmoothingFunction
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
>>> sentence_bleu(
|
| 25 |
+
... ['It is a place of quiet contemplation .'.split()],
|
| 26 |
+
... 'It is .'.split(),
|
| 27 |
+
... smoothing_function=SmoothingFunction().method4,
|
| 28 |
+
... )*100
|
| 29 |
+
4.4267...
|
lib/python3.10/site-packages/nltk/test/ccg.doctest
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
==============================
|
| 5 |
+
Combinatory Categorial Grammar
|
| 6 |
+
==============================
|
| 7 |
+
|
| 8 |
+
Relative Clauses
|
| 9 |
+
----------------
|
| 10 |
+
|
| 11 |
+
>>> from nltk.ccg import chart, lexicon
|
| 12 |
+
|
| 13 |
+
Construct a lexicon:
|
| 14 |
+
|
| 15 |
+
>>> lex = lexicon.fromstring('''
|
| 16 |
+
... :- S, NP, N, VP
|
| 17 |
+
...
|
| 18 |
+
... Det :: NP/N
|
| 19 |
+
... Pro :: NP
|
| 20 |
+
... Modal :: S\\NP/VP
|
| 21 |
+
...
|
| 22 |
+
... TV :: VP/NP
|
| 23 |
+
... DTV :: TV/NP
|
| 24 |
+
...
|
| 25 |
+
... the => Det
|
| 26 |
+
...
|
| 27 |
+
... that => Det
|
| 28 |
+
... that => NP
|
| 29 |
+
...
|
| 30 |
+
... I => Pro
|
| 31 |
+
... you => Pro
|
| 32 |
+
... we => Pro
|
| 33 |
+
...
|
| 34 |
+
... chef => N
|
| 35 |
+
... cake => N
|
| 36 |
+
... children => N
|
| 37 |
+
... dough => N
|
| 38 |
+
...
|
| 39 |
+
... will => Modal
|
| 40 |
+
... should => Modal
|
| 41 |
+
... might => Modal
|
| 42 |
+
... must => Modal
|
| 43 |
+
...
|
| 44 |
+
... and => var\\.,var/.,var
|
| 45 |
+
...
|
| 46 |
+
... to => VP[to]/VP
|
| 47 |
+
...
|
| 48 |
+
... without => (VP\\VP)/VP[ing]
|
| 49 |
+
...
|
| 50 |
+
... be => TV
|
| 51 |
+
... cook => TV
|
| 52 |
+
... eat => TV
|
| 53 |
+
...
|
| 54 |
+
... cooking => VP[ing]/NP
|
| 55 |
+
...
|
| 56 |
+
... give => DTV
|
| 57 |
+
...
|
| 58 |
+
... is => (S\\NP)/NP
|
| 59 |
+
... prefer => (S\\NP)/NP
|
| 60 |
+
...
|
| 61 |
+
... which => (N\\N)/(S/NP)
|
| 62 |
+
...
|
| 63 |
+
... persuade => (VP/VP[to])/NP
|
| 64 |
+
... ''')
|
| 65 |
+
|
| 66 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
| 67 |
+
>>> for parse in parser.parse("you prefer that cake".split()):
|
| 68 |
+
... chart.printCCGDerivation(parse)
|
| 69 |
+
... break
|
| 70 |
+
...
|
| 71 |
+
you prefer that cake
|
| 72 |
+
NP ((S\NP)/NP) (NP/N) N
|
| 73 |
+
-------------->
|
| 74 |
+
NP
|
| 75 |
+
--------------------------->
|
| 76 |
+
(S\NP)
|
| 77 |
+
--------------------------------<
|
| 78 |
+
S
|
| 79 |
+
|
| 80 |
+
>>> for parse in parser.parse("that is the cake which you prefer".split()):
|
| 81 |
+
... chart.printCCGDerivation(parse)
|
| 82 |
+
... break
|
| 83 |
+
...
|
| 84 |
+
that is the cake which you prefer
|
| 85 |
+
NP ((S\NP)/NP) (NP/N) N ((N\N)/(S/NP)) NP ((S\NP)/NP)
|
| 86 |
+
----->T
|
| 87 |
+
(S/(S\NP))
|
| 88 |
+
------------------>B
|
| 89 |
+
(S/NP)
|
| 90 |
+
---------------------------------->
|
| 91 |
+
(N\N)
|
| 92 |
+
----------------------------------------<
|
| 93 |
+
N
|
| 94 |
+
------------------------------------------------>
|
| 95 |
+
NP
|
| 96 |
+
------------------------------------------------------------->
|
| 97 |
+
(S\NP)
|
| 98 |
+
-------------------------------------------------------------------<
|
| 99 |
+
S
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
Some other sentences to try:
|
| 103 |
+
"that is the cake which we will persuade the chef to cook"
|
| 104 |
+
"that is the cake which we will persuade the chef to give the children"
|
| 105 |
+
|
| 106 |
+
>>> sent = "that is the dough which you will eat without cooking".split()
|
| 107 |
+
>>> nosub_parser = chart.CCGChartParser(lex, chart.ApplicationRuleSet +
|
| 108 |
+
... chart.CompositionRuleSet + chart.TypeRaiseRuleSet)
|
| 109 |
+
|
| 110 |
+
Without Substitution (no output)
|
| 111 |
+
|
| 112 |
+
>>> for parse in nosub_parser.parse(sent):
|
| 113 |
+
... chart.printCCGDerivation(parse)
|
| 114 |
+
|
| 115 |
+
With Substitution:
|
| 116 |
+
|
| 117 |
+
>>> for parse in parser.parse(sent):
|
| 118 |
+
... chart.printCCGDerivation(parse)
|
| 119 |
+
... break
|
| 120 |
+
...
|
| 121 |
+
that is the dough which you will eat without cooking
|
| 122 |
+
NP ((S\NP)/NP) (NP/N) N ((N\N)/(S/NP)) NP ((S\NP)/VP) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
|
| 123 |
+
----->T
|
| 124 |
+
(S/(S\NP))
|
| 125 |
+
------------------------------------->B
|
| 126 |
+
((VP\VP)/NP)
|
| 127 |
+
----------------------------------------------<Sx
|
| 128 |
+
(VP/NP)
|
| 129 |
+
----------------------------------------------------------->B
|
| 130 |
+
((S\NP)/NP)
|
| 131 |
+
---------------------------------------------------------------->B
|
| 132 |
+
(S/NP)
|
| 133 |
+
-------------------------------------------------------------------------------->
|
| 134 |
+
(N\N)
|
| 135 |
+
---------------------------------------------------------------------------------------<
|
| 136 |
+
N
|
| 137 |
+
----------------------------------------------------------------------------------------------->
|
| 138 |
+
NP
|
| 139 |
+
------------------------------------------------------------------------------------------------------------>
|
| 140 |
+
(S\NP)
|
| 141 |
+
------------------------------------------------------------------------------------------------------------------<
|
| 142 |
+
S
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
Conjunction
|
| 146 |
+
-----------
|
| 147 |
+
|
| 148 |
+
>>> from nltk.ccg.chart import CCGChartParser, ApplicationRuleSet, CompositionRuleSet
|
| 149 |
+
>>> from nltk.ccg.chart import SubstitutionRuleSet, TypeRaiseRuleSet, printCCGDerivation
|
| 150 |
+
>>> from nltk.ccg import lexicon
|
| 151 |
+
|
| 152 |
+
Lexicons for the tests:
|
| 153 |
+
|
| 154 |
+
>>> test1_lex = '''
|
| 155 |
+
... :- S,N,NP,VP
|
| 156 |
+
... I => NP
|
| 157 |
+
... you => NP
|
| 158 |
+
... will => S\\NP/VP
|
| 159 |
+
... cook => VP/NP
|
| 160 |
+
... which => (N\\N)/(S/NP)
|
| 161 |
+
... and => var\\.,var/.,var
|
| 162 |
+
... might => S\\NP/VP
|
| 163 |
+
... eat => VP/NP
|
| 164 |
+
... the => NP/N
|
| 165 |
+
... mushrooms => N
|
| 166 |
+
... parsnips => N'''
|
| 167 |
+
>>> test2_lex = '''
|
| 168 |
+
... :- N, S, NP, VP
|
| 169 |
+
... articles => N
|
| 170 |
+
... the => NP/N
|
| 171 |
+
... and => var\\.,var/.,var
|
| 172 |
+
... which => (N\\N)/(S/NP)
|
| 173 |
+
... I => NP
|
| 174 |
+
... anyone => NP
|
| 175 |
+
... will => (S/VP)\\NP
|
| 176 |
+
... file => VP/NP
|
| 177 |
+
... without => (VP\\VP)/VP[ing]
|
| 178 |
+
... forget => VP/NP
|
| 179 |
+
... reading => VP[ing]/NP
|
| 180 |
+
... '''
|
| 181 |
+
|
| 182 |
+
Tests handling of conjunctions.
|
| 183 |
+
Note that while the two derivations are different, they are semantically equivalent.
|
| 184 |
+
|
| 185 |
+
>>> lex = lexicon.fromstring(test1_lex)
|
| 186 |
+
>>> parser = CCGChartParser(lex, ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet)
|
| 187 |
+
>>> for parse in parser.parse("I will cook and might eat the mushrooms and parsnips".split()):
|
| 188 |
+
... printCCGDerivation(parse)
|
| 189 |
+
I will cook and might eat the mushrooms and parsnips
|
| 190 |
+
NP ((S\NP)/VP) (VP/NP) ((_var0\.,_var0)/.,_var0) ((S\NP)/VP) (VP/NP) (NP/N) N ((_var0\.,_var0)/.,_var0) N
|
| 191 |
+
---------------------->B
|
| 192 |
+
((S\NP)/NP)
|
| 193 |
+
---------------------->B
|
| 194 |
+
((S\NP)/NP)
|
| 195 |
+
------------------------------------------------->
|
| 196 |
+
(((S\NP)/NP)\.,((S\NP)/NP))
|
| 197 |
+
-----------------------------------------------------------------------<
|
| 198 |
+
((S\NP)/NP)
|
| 199 |
+
------------------------------------->
|
| 200 |
+
(N\.,N)
|
| 201 |
+
------------------------------------------------<
|
| 202 |
+
N
|
| 203 |
+
-------------------------------------------------------->
|
| 204 |
+
NP
|
| 205 |
+
------------------------------------------------------------------------------------------------------------------------------->
|
| 206 |
+
(S\NP)
|
| 207 |
+
-----------------------------------------------------------------------------------------------------------------------------------<
|
| 208 |
+
S
|
| 209 |
+
I will cook and might eat the mushrooms and parsnips
|
| 210 |
+
NP ((S\NP)/VP) (VP/NP) ((_var0\.,_var0)/.,_var0) ((S\NP)/VP) (VP/NP) (NP/N) N ((_var0\.,_var0)/.,_var0) N
|
| 211 |
+
---------------------->B
|
| 212 |
+
((S\NP)/NP)
|
| 213 |
+
---------------------->B
|
| 214 |
+
((S\NP)/NP)
|
| 215 |
+
------------------------------------------------->
|
| 216 |
+
(((S\NP)/NP)\.,((S\NP)/NP))
|
| 217 |
+
-----------------------------------------------------------------------<
|
| 218 |
+
((S\NP)/NP)
|
| 219 |
+
------------------------------------------------------------------------------->B
|
| 220 |
+
((S\NP)/N)
|
| 221 |
+
------------------------------------->
|
| 222 |
+
(N\.,N)
|
| 223 |
+
------------------------------------------------<
|
| 224 |
+
N
|
| 225 |
+
------------------------------------------------------------------------------------------------------------------------------->
|
| 226 |
+
(S\NP)
|
| 227 |
+
-----------------------------------------------------------------------------------------------------------------------------------<
|
| 228 |
+
S
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
Tests handling subject extraction.
|
| 232 |
+
Interesting to point that the two parses are clearly semantically different.
|
| 233 |
+
|
| 234 |
+
>>> lex = lexicon.fromstring(test2_lex)
|
| 235 |
+
>>> parser = CCGChartParser(lex, ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet)
|
| 236 |
+
>>> for parse in parser.parse("articles which I will file and forget without reading".split()):
|
| 237 |
+
... printCCGDerivation(parse)
|
| 238 |
+
articles which I will file and forget without reading
|
| 239 |
+
N ((N\N)/(S/NP)) NP ((S/VP)\NP) (VP/NP) ((_var0\.,_var0)/.,_var0) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
|
| 240 |
+
-----------------<
|
| 241 |
+
(S/VP)
|
| 242 |
+
------------------------------------->B
|
| 243 |
+
((VP\VP)/NP)
|
| 244 |
+
----------------------------------------------<Sx
|
| 245 |
+
(VP/NP)
|
| 246 |
+
------------------------------------------------------------------------->
|
| 247 |
+
((VP/NP)\.,(VP/NP))
|
| 248 |
+
----------------------------------------------------------------------------------<
|
| 249 |
+
(VP/NP)
|
| 250 |
+
--------------------------------------------------------------------------------------------------->B
|
| 251 |
+
(S/NP)
|
| 252 |
+
------------------------------------------------------------------------------------------------------------------->
|
| 253 |
+
(N\N)
|
| 254 |
+
-----------------------------------------------------------------------------------------------------------------------------<
|
| 255 |
+
N
|
| 256 |
+
articles which I will file and forget without reading
|
| 257 |
+
N ((N\N)/(S/NP)) NP ((S/VP)\NP) (VP/NP) ((_var0\.,_var0)/.,_var0) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
|
| 258 |
+
-----------------<
|
| 259 |
+
(S/VP)
|
| 260 |
+
------------------------------------>
|
| 261 |
+
((VP/NP)\.,(VP/NP))
|
| 262 |
+
---------------------------------------------<
|
| 263 |
+
(VP/NP)
|
| 264 |
+
------------------------------------->B
|
| 265 |
+
((VP\VP)/NP)
|
| 266 |
+
----------------------------------------------------------------------------------<Sx
|
| 267 |
+
(VP/NP)
|
| 268 |
+
--------------------------------------------------------------------------------------------------->B
|
| 269 |
+
(S/NP)
|
| 270 |
+
------------------------------------------------------------------------------------------------------------------->
|
| 271 |
+
(N\N)
|
| 272 |
+
-----------------------------------------------------------------------------------------------------------------------------<
|
| 273 |
+
N
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
Unicode support
|
| 277 |
+
---------------
|
| 278 |
+
|
| 279 |
+
Unicode words are supported.
|
| 280 |
+
|
| 281 |
+
>>> from nltk.ccg import chart, lexicon
|
| 282 |
+
|
| 283 |
+
Lexicons for the tests:
|
| 284 |
+
|
| 285 |
+
>>> lex = lexicon.fromstring('''
|
| 286 |
+
... :- S, N, NP, PP
|
| 287 |
+
...
|
| 288 |
+
... AdjI :: N\\N
|
| 289 |
+
... AdjD :: N/N
|
| 290 |
+
... AdvD :: S/S
|
| 291 |
+
... AdvI :: S\\S
|
| 292 |
+
... Det :: NP/N
|
| 293 |
+
... PrepNPCompl :: PP/NP
|
| 294 |
+
... PrepNAdjN :: S\\S/N
|
| 295 |
+
... PrepNAdjNP :: S\\S/NP
|
| 296 |
+
... VPNP :: S\\NP/NP
|
| 297 |
+
... VPPP :: S\\NP/PP
|
| 298 |
+
... VPser :: S\\NP/AdjI
|
| 299 |
+
...
|
| 300 |
+
... auto => N
|
| 301 |
+
... bebidas => N
|
| 302 |
+
... cine => N
|
| 303 |
+
... ley => N
|
| 304 |
+
... libro => N
|
| 305 |
+
... ministro => N
|
| 306 |
+
... panadería => N
|
| 307 |
+
... presidente => N
|
| 308 |
+
... super => N
|
| 309 |
+
...
|
| 310 |
+
... el => Det
|
| 311 |
+
... la => Det
|
| 312 |
+
... las => Det
|
| 313 |
+
... un => Det
|
| 314 |
+
...
|
| 315 |
+
... Ana => NP
|
| 316 |
+
... Pablo => NP
|
| 317 |
+
...
|
| 318 |
+
... y => var\\.,var/.,var
|
| 319 |
+
...
|
| 320 |
+
... pero => (S/NP)\\(S/NP)/(S/NP)
|
| 321 |
+
...
|
| 322 |
+
... anunció => VPNP
|
| 323 |
+
... compró => VPNP
|
| 324 |
+
... cree => S\\NP/S[dep]
|
| 325 |
+
... desmintió => VPNP
|
| 326 |
+
... lee => VPNP
|
| 327 |
+
... fueron => VPPP
|
| 328 |
+
...
|
| 329 |
+
... es => VPser
|
| 330 |
+
...
|
| 331 |
+
... interesante => AdjD
|
| 332 |
+
... interesante => AdjI
|
| 333 |
+
... nueva => AdjD
|
| 334 |
+
... nueva => AdjI
|
| 335 |
+
...
|
| 336 |
+
... a => PrepNPCompl
|
| 337 |
+
... en => PrepNAdjN
|
| 338 |
+
... en => PrepNAdjNP
|
| 339 |
+
...
|
| 340 |
+
... ayer => AdvI
|
| 341 |
+
...
|
| 342 |
+
... que => (NP\\NP)/(S/NP)
|
| 343 |
+
... que => S[dep]/S
|
| 344 |
+
... ''')
|
| 345 |
+
|
| 346 |
+
>>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
|
| 347 |
+
>>> for parse in parser.parse(u"el ministro anunció pero el presidente desmintió la nueva ley".split()):
|
| 348 |
+
... printCCGDerivation(parse) # doctest: +SKIP
|
| 349 |
+
... # it fails on python2.7 because of the unicode problem explained in https://github.com/nltk/nltk/pull/1354
|
| 350 |
+
... break
|
| 351 |
+
el ministro anunció pero el presidente desmintió la nueva ley
|
| 352 |
+
(NP/N) N ((S\NP)/NP) (((S/NP)\(S/NP))/(S/NP)) (NP/N) N ((S\NP)/NP) (NP/N) (N/N) N
|
| 353 |
+
------------------>
|
| 354 |
+
NP
|
| 355 |
+
------------------>T
|
| 356 |
+
(S/(S\NP))
|
| 357 |
+
-------------------->
|
| 358 |
+
NP
|
| 359 |
+
-------------------->T
|
| 360 |
+
(S/(S\NP))
|
| 361 |
+
--------------------------------->B
|
| 362 |
+
(S/NP)
|
| 363 |
+
----------------------------------------------------------->
|
| 364 |
+
((S/NP)\(S/NP))
|
| 365 |
+
------------>
|
| 366 |
+
N
|
| 367 |
+
-------------------->
|
| 368 |
+
NP
|
| 369 |
+
--------------------<T
|
| 370 |
+
(S\(S/NP))
|
| 371 |
+
-------------------------------------------------------------------------------<B
|
| 372 |
+
(S\(S/NP))
|
| 373 |
+
--------------------------------------------------------------------------------------------<B
|
| 374 |
+
(S/NP)
|
| 375 |
+
-------------------------------------------------------------------------------------------------------------->
|
| 376 |
+
S
|
lib/python3.10/site-packages/nltk/test/chat80.doctest
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
=======
|
| 5 |
+
Chat-80
|
| 6 |
+
=======
|
| 7 |
+
|
| 8 |
+
Chat-80 was a natural language system which allowed the user to
|
| 9 |
+
interrogate a Prolog knowledge base in the domain of world
|
| 10 |
+
geography. It was developed in the early '80s by Warren and Pereira; see
|
| 11 |
+
`<https://aclanthology.org/J82-3002.pdf>`_ for a description and
|
| 12 |
+
`<http://www.cis.upenn.edu/~pereira/oldies.html>`_ for the source
|
| 13 |
+
files.
|
| 14 |
+
|
| 15 |
+
The ``chat80`` module contains functions to extract data from the Chat-80
|
| 16 |
+
relation files ('the world database'), and convert then into a format
|
| 17 |
+
that can be incorporated in the FOL models of
|
| 18 |
+
``nltk.sem.evaluate``. The code assumes that the Prolog
|
| 19 |
+
input files are available in the NLTK corpora directory.
|
| 20 |
+
|
| 21 |
+
The Chat-80 World Database consists of the following files::
|
| 22 |
+
|
| 23 |
+
world0.pl
|
| 24 |
+
rivers.pl
|
| 25 |
+
cities.pl
|
| 26 |
+
countries.pl
|
| 27 |
+
contain.pl
|
| 28 |
+
borders.pl
|
| 29 |
+
|
| 30 |
+
This module uses a slightly modified version of ``world0.pl``, in which
|
| 31 |
+
a set of Prolog rules have been omitted. The modified file is named
|
| 32 |
+
``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since
|
| 33 |
+
it uses a list rather than a string in the second field.
|
| 34 |
+
|
| 35 |
+
Reading Chat-80 Files
|
| 36 |
+
=====================
|
| 37 |
+
|
| 38 |
+
Chat-80 relations are like tables in a relational database. The
|
| 39 |
+
relation acts as the name of the table; the first argument acts as the
|
| 40 |
+
'primary key'; and subsequent arguments are further fields in the
|
| 41 |
+
table. In general, the name of the table provides a label for a unary
|
| 42 |
+
predicate whose extension is all the primary keys. For example,
|
| 43 |
+
relations in ``cities.pl`` are of the following form::
|
| 44 |
+
|
| 45 |
+
'city(athens,greece,1368).'
|
| 46 |
+
|
| 47 |
+
Here, ``'athens'`` is the key, and will be mapped to a member of the
|
| 48 |
+
unary predicate *city*.
|
| 49 |
+
|
| 50 |
+
By analogy with NLTK corpora, ``chat80`` defines a number of 'items'
|
| 51 |
+
which correspond to the relations.
|
| 52 |
+
|
| 53 |
+
>>> from nltk.sem import chat80
|
| 54 |
+
>>> print(chat80.items)
|
| 55 |
+
('borders', 'circle_of_lat', 'circle_of_long', 'city', ...)
|
| 56 |
+
|
| 57 |
+
The fields in the table are mapped to binary predicates. The first
|
| 58 |
+
argument of the predicate is the primary key, while the second
|
| 59 |
+
argument is the data in the relevant field. Thus, in the above
|
| 60 |
+
example, the third field is mapped to the binary predicate
|
| 61 |
+
*population_of*, whose extension is a set of pairs such as
|
| 62 |
+
``'(athens, 1368)'``.
|
| 63 |
+
|
| 64 |
+
An exception to this general framework is required by the relations in
|
| 65 |
+
the files ``borders.pl`` and ``contains.pl``. These contain facts of the
|
| 66 |
+
following form::
|
| 67 |
+
|
| 68 |
+
'borders(albania,greece).'
|
| 69 |
+
|
| 70 |
+
'contains0(africa,central_africa).'
|
| 71 |
+
|
| 72 |
+
We do not want to form a unary concept out the element in
|
| 73 |
+
the first field of these records, and we want the label of the binary
|
| 74 |
+
relation just to be ``'border'``/``'contain'`` respectively.
|
| 75 |
+
|
| 76 |
+
In order to drive the extraction process, we use 'relation metadata bundles'
|
| 77 |
+
which are Python dictionaries such as the following::
|
| 78 |
+
|
| 79 |
+
city = {'label': 'city',
|
| 80 |
+
'closures': [],
|
| 81 |
+
'schema': ['city', 'country', 'population'],
|
| 82 |
+
'filename': 'cities.pl'}
|
| 83 |
+
|
| 84 |
+
According to this, the file ``city['filename']`` contains a list of
|
| 85 |
+
relational tuples (or more accurately, the corresponding strings in
|
| 86 |
+
Prolog form) whose predicate symbol is ``city['label']`` and whose
|
| 87 |
+
relational schema is ``city['schema']``. The notion of a ``closure`` is
|
| 88 |
+
discussed in the next section.
|
| 89 |
+
|
| 90 |
+
Concepts
|
| 91 |
+
========
|
| 92 |
+
In order to encapsulate the results of the extraction, a class of
|
| 93 |
+
``Concept``\ s is introduced. A ``Concept`` object has a number of
|
| 94 |
+
attributes, in particular a ``prefLabel``, an arity and ``extension``.
|
| 95 |
+
|
| 96 |
+
>>> c1 = chat80.Concept('dog', arity=1, extension=set(['d1', 'd2']))
|
| 97 |
+
>>> print(c1)
|
| 98 |
+
Label = 'dog'
|
| 99 |
+
Arity = 1
|
| 100 |
+
Extension = ['d1', 'd2']
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
The ``extension`` attribute makes it easier to inspect the output of
|
| 105 |
+
the extraction.
|
| 106 |
+
|
| 107 |
+
>>> schema = ['city', 'country', 'population']
|
| 108 |
+
>>> concepts = chat80.clause2concepts('cities.pl', 'city', schema)
|
| 109 |
+
>>> concepts
|
| 110 |
+
[Concept('city'), Concept('country_of'), Concept('population_of')]
|
| 111 |
+
>>> for c in concepts:
|
| 112 |
+
... print("%s:\n\t%s" % (c.prefLabel, c.extension[:4]))
|
| 113 |
+
city:
|
| 114 |
+
['athens', 'bangkok', 'barcelona', 'berlin']
|
| 115 |
+
country_of:
|
| 116 |
+
[('athens', 'greece'), ('bangkok', 'thailand'), ('barcelona', 'spain'), ('berlin', 'east_germany')]
|
| 117 |
+
population_of:
|
| 118 |
+
[('athens', '1368'), ('bangkok', '1178'), ('barcelona', '1280'), ('berlin', '3481')]
|
| 119 |
+
|
| 120 |
+
In addition, the ``extension`` can be further
|
| 121 |
+
processed: in the case of the ``'border'`` relation, we check that the
|
| 122 |
+
relation is **symmetric**, and in the case of the ``'contain'``
|
| 123 |
+
relation, we carry out the **transitive closure**. The closure
|
| 124 |
+
properties associated with a concept is indicated in the relation
|
| 125 |
+
metadata, as indicated earlier.
|
| 126 |
+
|
| 127 |
+
>>> borders = set([('a1', 'a2'), ('a2', 'a3')])
|
| 128 |
+
>>> c2 = chat80.Concept('borders', arity=2, extension=borders)
|
| 129 |
+
>>> print(c2)
|
| 130 |
+
Label = 'borders'
|
| 131 |
+
Arity = 2
|
| 132 |
+
Extension = [('a1', 'a2'), ('a2', 'a3')]
|
| 133 |
+
>>> c3 = chat80.Concept('borders', arity=2, closures=['symmetric'], extension=borders)
|
| 134 |
+
>>> c3.close()
|
| 135 |
+
>>> print(c3)
|
| 136 |
+
Label = 'borders'
|
| 137 |
+
Arity = 2
|
| 138 |
+
Extension = [('a1', 'a2'), ('a2', 'a1'), ('a2', 'a3'), ('a3', 'a2')]
|
| 139 |
+
|
| 140 |
+
The ``extension`` of a ``Concept`` object is then incorporated into a
|
| 141 |
+
``Valuation`` object.
|
| 142 |
+
|
| 143 |
+
Persistence
|
| 144 |
+
===========
|
| 145 |
+
The functions ``val_dump`` and ``val_load`` are provided to allow a
|
| 146 |
+
valuation to be stored in a persistent database and re-loaded, rather
|
| 147 |
+
than having to be re-computed each time.
|
| 148 |
+
|
| 149 |
+
Individuals and Lexical Items
|
| 150 |
+
=============================
|
| 151 |
+
As well as deriving relations from the Chat-80 data, we also create a
|
| 152 |
+
set of individual constants, one for each entity in the domain. The
|
| 153 |
+
individual constants are string-identical to the entities. For
|
| 154 |
+
example, given a data item such as ``'zloty'``, we add to the valuation
|
| 155 |
+
a pair ``('zloty', 'zloty')``. In order to parse English sentences that
|
| 156 |
+
refer to these entities, we also create a lexical item such as the
|
| 157 |
+
following for each individual constant::
|
| 158 |
+
|
| 159 |
+
PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty'
|
| 160 |
+
|
| 161 |
+
The set of rules is written to the file ``chat_pnames.fcfg`` in the
|
| 162 |
+
current directory.
|
| 163 |
+
|
| 164 |
+
SQL Query
|
| 165 |
+
=========
|
| 166 |
+
|
| 167 |
+
The ``city`` relation is also available in RDB form and can be queried
|
| 168 |
+
using SQL statements.
|
| 169 |
+
|
| 170 |
+
>>> import nltk
|
| 171 |
+
>>> q = "SELECT City, Population FROM city_table WHERE Country = 'china' and Population > 1000"
|
| 172 |
+
>>> for answer in chat80.sql_query('corpora/city_database/city.db', q):
|
| 173 |
+
... print("%-10s %4s" % answer)
|
| 174 |
+
canton 1496
|
| 175 |
+
chungking 1100
|
| 176 |
+
mukden 1551
|
| 177 |
+
peking 2031
|
| 178 |
+
shanghai 5407
|
| 179 |
+
tientsin 1795
|
| 180 |
+
|
| 181 |
+
The (deliberately naive) grammar ``sql.fcfg`` translates from English
|
| 182 |
+
to SQL:
|
| 183 |
+
|
| 184 |
+
>>> nltk.data.show_cfg('grammars/book_grammars/sql0.fcfg')
|
| 185 |
+
% start S
|
| 186 |
+
S[SEM=(?np + WHERE + ?vp)] -> NP[SEM=?np] VP[SEM=?vp]
|
| 187 |
+
VP[SEM=(?v + ?pp)] -> IV[SEM=?v] PP[SEM=?pp]
|
| 188 |
+
VP[SEM=(?v + ?ap)] -> IV[SEM=?v] AP[SEM=?ap]
|
| 189 |
+
NP[SEM=(?det + ?n)] -> Det[SEM=?det] N[SEM=?n]
|
| 190 |
+
PP[SEM=(?p + ?np)] -> P[SEM=?p] NP[SEM=?np]
|
| 191 |
+
AP[SEM=?pp] -> A[SEM=?a] PP[SEM=?pp]
|
| 192 |
+
NP[SEM='Country="greece"'] -> 'Greece'
|
| 193 |
+
NP[SEM='Country="china"'] -> 'China'
|
| 194 |
+
Det[SEM='SELECT'] -> 'Which' | 'What'
|
| 195 |
+
N[SEM='City FROM city_table'] -> 'cities'
|
| 196 |
+
IV[SEM=''] -> 'are'
|
| 197 |
+
A[SEM=''] -> 'located'
|
| 198 |
+
P[SEM=''] -> 'in'
|
| 199 |
+
|
| 200 |
+
Given this grammar, we can express, and then execute, queries in English.
|
| 201 |
+
|
| 202 |
+
>>> cp = nltk.parse.load_parser('grammars/book_grammars/sql0.fcfg')
|
| 203 |
+
>>> query = 'What cities are in China'
|
| 204 |
+
>>> for tree in cp.parse(query.split()):
|
| 205 |
+
... answer = tree.label()['SEM']
|
| 206 |
+
... q = " ".join(answer)
|
| 207 |
+
... print(q)
|
| 208 |
+
...
|
| 209 |
+
SELECT City FROM city_table WHERE Country="china"
|
| 210 |
+
|
| 211 |
+
>>> rows = chat80.sql_query('corpora/city_database/city.db', q)
|
| 212 |
+
>>> for r in rows: print("%s" % r, end=' ')
|
| 213 |
+
canton chungking dairen harbin kowloon mukden peking shanghai sian tientsin
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
Using Valuations
|
| 217 |
+
-----------------
|
| 218 |
+
|
| 219 |
+
In order to convert such an extension into a valuation, we use the
|
| 220 |
+
``make_valuation()`` method; setting ``read=True`` creates and returns
|
| 221 |
+
a new ``Valuation`` object which contains the results.
|
| 222 |
+
|
| 223 |
+
>>> val = chat80.make_valuation(concepts, read=True)
|
| 224 |
+
>>> 'calcutta' in val['city']
|
| 225 |
+
True
|
| 226 |
+
>>> [town for (town, country) in val['country_of'] if country == 'india']
|
| 227 |
+
['bombay', 'calcutta', 'delhi', 'hyderabad', 'madras']
|
| 228 |
+
>>> dom = val.domain
|
| 229 |
+
>>> g = nltk.sem.Assignment(dom)
|
| 230 |
+
>>> m = nltk.sem.Model(dom, val)
|
| 231 |
+
>>> m.evaluate(r'population_of(jakarta, 533)', g)
|
| 232 |
+
True
|
lib/python3.10/site-packages/nltk/test/childes_fixt.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def setup_module():
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import nltk.data
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
nltk.data.find("corpora/childes/data-xml/Eng-USA-MOR/")
|
| 8 |
+
except LookupError as e:
|
| 9 |
+
pytest.skip(
|
| 10 |
+
"The CHILDES corpus is not found. "
|
| 11 |
+
"It should be manually downloaded and saved/unpacked "
|
| 12 |
+
"to [NLTK_Data_Dir]/corpora/childes/"
|
| 13 |
+
)
|
lib/python3.10/site-packages/nltk/test/classify_fixt.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# most of classify.doctest requires numpy
|
| 2 |
+
def setup_module():
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
pytest.importorskip("numpy")
|
lib/python3.10/site-packages/nltk/test/concordance.doctest
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2016 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
==================================
|
| 5 |
+
Concordance Example
|
| 6 |
+
==================================
|
| 7 |
+
|
| 8 |
+
A concordance view shows us every occurrence of a given
|
| 9 |
+
word, together with some context. Here we look up the word monstrous
|
| 10 |
+
in Moby Dick by entering text1 followed by a period, then the term
|
| 11 |
+
concordance, and then placing "monstrous" in parentheses:
|
| 12 |
+
|
| 13 |
+
>>> from nltk.corpus import gutenberg
|
| 14 |
+
>>> from nltk.text import Text
|
| 15 |
+
>>> corpus = gutenberg.words('melville-moby_dick.txt')
|
| 16 |
+
>>> text = Text(corpus)
|
| 17 |
+
|
| 18 |
+
>>> text.concordance("monstrous")
|
| 19 |
+
Displaying 11 of 11 matches:
|
| 20 |
+
ong the former , one was of a most monstrous size . ... This came towards us ,
|
| 21 |
+
ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
|
| 22 |
+
ll over with a heathenish array of monstrous clubs and spears . Some were thick
|
| 23 |
+
d as you gazed , and wondered what monstrous cannibal and savage could ever hav
|
| 24 |
+
that has survived the flood ; most monstrous and most mountainous ! That Himmal
|
| 25 |
+
they might scout at Moby Dick as a monstrous fable , or still worse and more de
|
| 26 |
+
th of Radney .'" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l
|
| 27 |
+
ing Scenes . In connexion with the monstrous pictures of whales , I am strongly
|
| 28 |
+
ere to enter upon those still more monstrous stories of them which are to be fo
|
| 29 |
+
ght have been rummaged out of this monstrous cabinet there is no telling . But
|
| 30 |
+
of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u
|
| 31 |
+
|
| 32 |
+
>>> text.concordance("monstrous")
|
| 33 |
+
Displaying 11 of 11 matches:
|
| 34 |
+
ong the former , one was of a most monstrous size . ... This came towards us ,
|
| 35 |
+
ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
|
| 36 |
+
ll over with a heathenish array of monstrous clubs and spears . Some were thick
|
| 37 |
+
...
|
| 38 |
+
|
| 39 |
+
We can also search for a multi-word phrase by passing a list of strings:
|
| 40 |
+
|
| 41 |
+
>>> text.concordance(["monstrous", "size"])
|
| 42 |
+
Displaying 2 of 2 matches:
|
| 43 |
+
the former , one was of a most monstrous size . ... This came towards us , op
|
| 44 |
+
Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead upo
|
| 45 |
+
|
| 46 |
+
=================================
|
| 47 |
+
Concordance List
|
| 48 |
+
=================================
|
| 49 |
+
|
| 50 |
+
Often we need to store the results of concordance for further usage.
|
| 51 |
+
To do so, call the concordance function with the stdout argument set
|
| 52 |
+
to false:
|
| 53 |
+
|
| 54 |
+
>>> from nltk.corpus import gutenberg
|
| 55 |
+
>>> from nltk.text import Text
|
| 56 |
+
>>> corpus = gutenberg.words('melville-moby_dick.txt')
|
| 57 |
+
>>> text = Text(corpus)
|
| 58 |
+
>>> con_list = text.concordance_list("monstrous")
|
| 59 |
+
>>> con_list[2].line
|
| 60 |
+
'll over with a heathenish array of monstrous clubs and spears . Some were thick'
|
| 61 |
+
>>> len(con_list)
|
| 62 |
+
11
|
| 63 |
+
|
| 64 |
+
=================================
|
| 65 |
+
Patching Issue #2088
|
| 66 |
+
=================================
|
| 67 |
+
|
| 68 |
+
Patching https://github.com/nltk/nltk/issues/2088
|
| 69 |
+
The left slice of the left context should be clip to 0 if the `i-context` < 0.
|
| 70 |
+
|
| 71 |
+
>>> from nltk import Text, word_tokenize
|
| 72 |
+
>>> jane_eyre = 'Chapter 1\nTHERE was no possibility of taking a walk that day. We had been wandering, indeed, in the leafless shrubbery an hour in the morning; but since dinner (Mrs. Reed, when there was no company, dined early) the cold winter wind had brought with it clouds so sombre, and a rain so penetrating, that further outdoor exercise was now out of the question.'
|
| 73 |
+
>>> text = Text(word_tokenize(jane_eyre))
|
| 74 |
+
>>> text.concordance_list('taking')[0].left
|
| 75 |
+
['Chapter', '1', 'THERE', 'was', 'no', 'possibility', 'of']
|
lib/python3.10/site-packages/nltk/test/corpus.doctest
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
lib/python3.10/site-packages/nltk/test/data.doctest
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
=========================================
|
| 5 |
+
Loading Resources From the Data Package
|
| 6 |
+
=========================================
|
| 7 |
+
|
| 8 |
+
>>> import nltk.data
|
| 9 |
+
|
| 10 |
+
Overview
|
| 11 |
+
~~~~~~~~
|
| 12 |
+
The `nltk.data` module contains functions that can be used to load
|
| 13 |
+
NLTK resource files, such as corpora, grammars, and saved processing
|
| 14 |
+
objects.
|
| 15 |
+
|
| 16 |
+
Loading Data Files
|
| 17 |
+
~~~~~~~~~~~~~~~~~~
|
| 18 |
+
Resources are loaded using the function `nltk.data.load()`, which
|
| 19 |
+
takes as its first argument a URL specifying what file should be
|
| 20 |
+
loaded. The ``nltk:`` protocol loads files from the NLTK data
|
| 21 |
+
distribution:
|
| 22 |
+
|
| 23 |
+
>>> tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle')
|
| 24 |
+
>>> tokenizer.tokenize('Hello. This is a test. It works!')
|
| 25 |
+
['Hello.', 'This is a test.', 'It works!']
|
| 26 |
+
|
| 27 |
+
It is important to note that there should be no space following the
|
| 28 |
+
colon (':') in the URL; 'nltk: tokenizers/punkt/english.pickle' will
|
| 29 |
+
not work!
|
| 30 |
+
|
| 31 |
+
The ``nltk:`` protocol is used by default if no protocol is specified:
|
| 32 |
+
|
| 33 |
+
>>> nltk.data.load('tokenizers/punkt/english.pickle')
|
| 34 |
+
<nltk.tokenize.punkt.PunktSentenceTokenizer object at ...>
|
| 35 |
+
|
| 36 |
+
But it is also possible to load resources from ``http:``, ``ftp:``,
|
| 37 |
+
and ``file:`` URLs:
|
| 38 |
+
|
| 39 |
+
>>> # Load a grammar from the NLTK webpage.
|
| 40 |
+
>>> cfg = nltk.data.load('https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg')
|
| 41 |
+
>>> print(cfg) # doctest: +ELLIPSIS
|
| 42 |
+
Grammar with 14 productions (start state = S)
|
| 43 |
+
S -> NP VP
|
| 44 |
+
PP -> P NP
|
| 45 |
+
...
|
| 46 |
+
P -> 'on'
|
| 47 |
+
P -> 'in'
|
| 48 |
+
|
| 49 |
+
>>> # Load a grammar using an absolute path.
|
| 50 |
+
>>> url = 'file:%s' % nltk.data.find('grammars/sample_grammars/toy.cfg')
|
| 51 |
+
>>> url.replace('\\', '/')
|
| 52 |
+
'file:...toy.cfg'
|
| 53 |
+
>>> print(nltk.data.load(url))
|
| 54 |
+
Grammar with 14 productions (start state = S)
|
| 55 |
+
S -> NP VP
|
| 56 |
+
PP -> P NP
|
| 57 |
+
...
|
| 58 |
+
P -> 'on'
|
| 59 |
+
P -> 'in'
|
| 60 |
+
|
| 61 |
+
The second argument to the `nltk.data.load()` function specifies the
|
| 62 |
+
file format, which determines how the file's contents are processed
|
| 63 |
+
before they are returned by ``load()``. The formats that are
|
| 64 |
+
currently supported by the data module are described by the dictionary
|
| 65 |
+
`nltk.data.FORMATS`:
|
| 66 |
+
|
| 67 |
+
>>> for format, descr in sorted(nltk.data.FORMATS.items()):
|
| 68 |
+
... print('{0:<7} {1:}'.format(format, descr))
|
| 69 |
+
cfg A context free grammar.
|
| 70 |
+
fcfg A feature CFG.
|
| 71 |
+
fol A list of first order logic expressions, parsed with
|
| 72 |
+
nltk.sem.logic.Expression.fromstring.
|
| 73 |
+
json A serialized python object, stored using the json module.
|
| 74 |
+
logic A list of first order logic expressions, parsed with
|
| 75 |
+
nltk.sem.logic.LogicParser. Requires an additional logic_parser
|
| 76 |
+
parameter
|
| 77 |
+
pcfg A probabilistic CFG.
|
| 78 |
+
pickle A serialized python object, stored using the pickle
|
| 79 |
+
module.
|
| 80 |
+
raw The raw (byte string) contents of a file.
|
| 81 |
+
text The raw (unicode string) contents of a file.
|
| 82 |
+
val A semantic valuation, parsed by
|
| 83 |
+
nltk.sem.Valuation.fromstring.
|
| 84 |
+
yaml A serialized python object, stored using the yaml module.
|
| 85 |
+
|
| 86 |
+
`nltk.data.load()` will raise a ValueError if a bad format name is
|
| 87 |
+
specified:
|
| 88 |
+
|
| 89 |
+
>>> nltk.data.load('grammars/sample_grammars/toy.cfg', 'bar')
|
| 90 |
+
Traceback (most recent call last):
|
| 91 |
+
. . .
|
| 92 |
+
ValueError: Unknown format type!
|
| 93 |
+
|
| 94 |
+
By default, the ``"auto"`` format is used, which chooses a format
|
| 95 |
+
based on the filename's extension. The mapping from file extensions
|
| 96 |
+
to format names is specified by `nltk.data.AUTO_FORMATS`:
|
| 97 |
+
|
| 98 |
+
>>> for ext, format in sorted(nltk.data.AUTO_FORMATS.items()):
|
| 99 |
+
... print('.%-7s -> %s' % (ext, format))
|
| 100 |
+
.cfg -> cfg
|
| 101 |
+
.fcfg -> fcfg
|
| 102 |
+
.fol -> fol
|
| 103 |
+
.json -> json
|
| 104 |
+
.logic -> logic
|
| 105 |
+
.pcfg -> pcfg
|
| 106 |
+
.pickle -> pickle
|
| 107 |
+
.text -> text
|
| 108 |
+
.txt -> text
|
| 109 |
+
.val -> val
|
| 110 |
+
.yaml -> yaml
|
| 111 |
+
|
| 112 |
+
If `nltk.data.load()` is unable to determine the format based on the
|
| 113 |
+
filename's extension, it will raise a ValueError:
|
| 114 |
+
|
| 115 |
+
>>> nltk.data.load('foo.bar')
|
| 116 |
+
Traceback (most recent call last):
|
| 117 |
+
. . .
|
| 118 |
+
ValueError: Could not determine format for foo.bar based on its file
|
| 119 |
+
extension; use the "format" argument to specify the format explicitly.
|
| 120 |
+
|
| 121 |
+
Note that by explicitly specifying the ``format`` argument, you can
|
| 122 |
+
override the load method's default processing behavior. For example,
|
| 123 |
+
to get the raw contents of any file, simply use ``format="raw"``:
|
| 124 |
+
|
| 125 |
+
>>> s = nltk.data.load('grammars/sample_grammars/toy.cfg', 'text')
|
| 126 |
+
>>> print(s)
|
| 127 |
+
S -> NP VP
|
| 128 |
+
PP -> P NP
|
| 129 |
+
NP -> Det N | NP PP
|
| 130 |
+
VP -> V NP | VP PP
|
| 131 |
+
...
|
| 132 |
+
|
| 133 |
+
Making Local Copies
|
| 134 |
+
~~~~~~~~~~~~~~~~~~~
|
| 135 |
+
.. This will not be visible in the html output: create a tempdir to
|
| 136 |
+
play in.
|
| 137 |
+
>>> import tempfile, os
|
| 138 |
+
>>> tempdir = tempfile.mkdtemp()
|
| 139 |
+
>>> old_dir = os.path.abspath('.')
|
| 140 |
+
>>> os.chdir(tempdir)
|
| 141 |
+
|
| 142 |
+
The function `nltk.data.retrieve()` copies a given resource to a local
|
| 143 |
+
file. This can be useful, for example, if you want to edit one of the
|
| 144 |
+
sample grammars.
|
| 145 |
+
|
| 146 |
+
>>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg')
|
| 147 |
+
Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy.cfg'
|
| 148 |
+
|
| 149 |
+
>>> # Simulate editing the grammar.
|
| 150 |
+
>>> with open('toy.cfg') as inp:
|
| 151 |
+
... s = inp.read().replace('NP', 'DP')
|
| 152 |
+
>>> with open('toy.cfg', 'w') as out:
|
| 153 |
+
... _bytes_written = out.write(s)
|
| 154 |
+
|
| 155 |
+
>>> # Load the edited grammar, & display it.
|
| 156 |
+
>>> cfg = nltk.data.load('file:///' + os.path.abspath('toy.cfg'))
|
| 157 |
+
>>> print(cfg)
|
| 158 |
+
Grammar with 14 productions (start state = S)
|
| 159 |
+
S -> DP VP
|
| 160 |
+
PP -> P DP
|
| 161 |
+
...
|
| 162 |
+
P -> 'on'
|
| 163 |
+
P -> 'in'
|
| 164 |
+
|
| 165 |
+
The second argument to `nltk.data.retrieve()` specifies the filename
|
| 166 |
+
for the new copy of the file. By default, the source file's filename
|
| 167 |
+
is used.
|
| 168 |
+
|
| 169 |
+
>>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg', 'mytoy.cfg')
|
| 170 |
+
Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'mytoy.cfg'
|
| 171 |
+
>>> os.path.isfile('./mytoy.cfg')
|
| 172 |
+
True
|
| 173 |
+
>>> nltk.data.retrieve('grammars/sample_grammars/np.fcfg')
|
| 174 |
+
Retrieving 'nltk:grammars/sample_grammars/np.fcfg', saving to 'np.fcfg'
|
| 175 |
+
>>> os.path.isfile('./np.fcfg')
|
| 176 |
+
True
|
| 177 |
+
|
| 178 |
+
If a file with the specified (or default) filename already exists in
|
| 179 |
+
the current directory, then `nltk.data.retrieve()` will raise a
|
| 180 |
+
ValueError exception. It will *not* overwrite the file:
|
| 181 |
+
|
| 182 |
+
>>> os.path.isfile('./toy.cfg')
|
| 183 |
+
True
|
| 184 |
+
>>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg')
|
| 185 |
+
Traceback (most recent call last):
|
| 186 |
+
. . .
|
| 187 |
+
ValueError: File '...toy.cfg' already exists!
|
| 188 |
+
|
| 189 |
+
.. This will not be visible in the html output: clean up the tempdir.
|
| 190 |
+
>>> os.chdir(old_dir)
|
| 191 |
+
>>> for f in os.listdir(tempdir):
|
| 192 |
+
... os.remove(os.path.join(tempdir, f))
|
| 193 |
+
>>> os.rmdir(tempdir)
|
| 194 |
+
|
| 195 |
+
Finding Files in the NLTK Data Package
|
| 196 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 197 |
+
The `nltk.data.find()` function searches the NLTK data package for a
|
| 198 |
+
given file, and returns a pointer to that file. This pointer can
|
| 199 |
+
either be a `FileSystemPathPointer` (whose `path` attribute gives the
|
| 200 |
+
absolute path of the file); or a `ZipFilePathPointer`, specifying a
|
| 201 |
+
zipfile and the name of an entry within that zipfile. Both pointer
|
| 202 |
+
types define the `open()` method, which can be used to read the string
|
| 203 |
+
contents of the file.
|
| 204 |
+
|
| 205 |
+
>>> path = nltk.data.find('corpora/abc/rural.txt')
|
| 206 |
+
>>> str(path)
|
| 207 |
+
'...rural.txt'
|
| 208 |
+
>>> print(path.open().read(60).decode())
|
| 209 |
+
PM denies knowledge of AWB kickbacks
|
| 210 |
+
The Prime Minister has
|
| 211 |
+
|
| 212 |
+
Alternatively, the `nltk.data.load()` function can be used with the
|
| 213 |
+
keyword argument ``format="raw"``:
|
| 214 |
+
|
| 215 |
+
>>> s = nltk.data.load('corpora/abc/rural.txt', format='raw')[:60]
|
| 216 |
+
>>> print(s.decode())
|
| 217 |
+
PM denies knowledge of AWB kickbacks
|
| 218 |
+
The Prime Minister has
|
| 219 |
+
|
| 220 |
+
Alternatively, you can use the keyword argument ``format="text"``:
|
| 221 |
+
|
| 222 |
+
>>> s = nltk.data.load('corpora/abc/rural.txt', format='text')[:60]
|
| 223 |
+
>>> print(s)
|
| 224 |
+
PM denies knowledge of AWB kickbacks
|
| 225 |
+
The Prime Minister has
|
| 226 |
+
|
| 227 |
+
Resource Caching
|
| 228 |
+
~~~~~~~~~~~~~~~~
|
| 229 |
+
|
| 230 |
+
NLTK uses a weakref dictionary to maintain a cache of resources that
|
| 231 |
+
have been loaded. If you load a resource that is already stored in
|
| 232 |
+
the cache, then the cached copy will be returned. This behavior can
|
| 233 |
+
be seen by the trace output generated when verbose=True:
|
| 234 |
+
|
| 235 |
+
>>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', verbose=True)
|
| 236 |
+
<<Loading nltk:grammars/book_grammars/feat0.fcfg>>
|
| 237 |
+
>>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', verbose=True)
|
| 238 |
+
<<Using cached copy of nltk:grammars/book_grammars/feat0.fcfg>>
|
| 239 |
+
|
| 240 |
+
If you wish to load a resource from its source, bypassing the cache,
|
| 241 |
+
use the ``cache=False`` argument to `nltk.data.load()`. This can be
|
| 242 |
+
useful, for example, if the resource is loaded from a local file, and
|
| 243 |
+
you are actively editing that file:
|
| 244 |
+
|
| 245 |
+
>>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg',cache=False,verbose=True)
|
| 246 |
+
<<Loading nltk:grammars/book_grammars/feat0.fcfg>>
|
| 247 |
+
|
| 248 |
+
The cache *no longer* uses weak references. A resource will not be
|
| 249 |
+
automatically expunged from the cache when no more objects are using
|
| 250 |
+
it. In the following example, when we clear the variable ``feat0``,
|
| 251 |
+
the reference count for the feature grammar object drops to zero.
|
| 252 |
+
However, the object remains cached:
|
| 253 |
+
|
| 254 |
+
>>> del feat0
|
| 255 |
+
>>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg',
|
| 256 |
+
... verbose=True)
|
| 257 |
+
<<Using cached copy of nltk:grammars/book_grammars/feat0.fcfg>>
|
| 258 |
+
|
| 259 |
+
You can clear the entire contents of the cache, using
|
| 260 |
+
`nltk.data.clear_cache()`:
|
| 261 |
+
|
| 262 |
+
>>> nltk.data.clear_cache()
|
| 263 |
+
|
| 264 |
+
Retrieving other Data Sources
|
| 265 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 266 |
+
>>> formulas = nltk.data.load('grammars/book_grammars/background.fol')
|
| 267 |
+
>>> for f in formulas: print(str(f))
|
| 268 |
+
all x.(boxerdog(x) -> dog(x))
|
| 269 |
+
all x.(boxer(x) -> person(x))
|
| 270 |
+
all x.-(dog(x) & person(x))
|
| 271 |
+
all x.(married(x) <-> exists y.marry(x,y))
|
| 272 |
+
all x.(bark(x) -> dog(x))
|
| 273 |
+
all x y.(marry(x,y) -> (person(x) & person(y)))
|
| 274 |
+
-(Vincent = Mia)
|
| 275 |
+
-(Vincent = Fido)
|
| 276 |
+
-(Mia = Fido)
|
| 277 |
+
|
| 278 |
+
Regression Tests
|
| 279 |
+
~~~~~~~~~~~~~~~~
|
| 280 |
+
Create a temp dir for tests that write files:
|
| 281 |
+
|
| 282 |
+
>>> import tempfile, os
|
| 283 |
+
>>> tempdir = tempfile.mkdtemp()
|
| 284 |
+
>>> old_dir = os.path.abspath('.')
|
| 285 |
+
>>> os.chdir(tempdir)
|
| 286 |
+
|
| 287 |
+
The `retrieve()` function accepts all url types:
|
| 288 |
+
|
| 289 |
+
>>> urls = ['https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg',
|
| 290 |
+
... 'file:%s' % nltk.data.find('grammars/sample_grammars/toy.cfg'),
|
| 291 |
+
... 'nltk:grammars/sample_grammars/toy.cfg',
|
| 292 |
+
... 'grammars/sample_grammars/toy.cfg']
|
| 293 |
+
>>> for i, url in enumerate(urls):
|
| 294 |
+
... nltk.data.retrieve(url, 'toy-%d.cfg' % i)
|
| 295 |
+
Retrieving 'https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg', saving to 'toy-0.cfg'
|
| 296 |
+
Retrieving 'file:...toy.cfg', saving to 'toy-1.cfg'
|
| 297 |
+
Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy-2.cfg'
|
| 298 |
+
Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy-3.cfg'
|
| 299 |
+
|
| 300 |
+
Clean up the temp dir:
|
| 301 |
+
|
| 302 |
+
>>> os.chdir(old_dir)
|
| 303 |
+
>>> for f in os.listdir(tempdir):
|
| 304 |
+
... os.remove(os.path.join(tempdir, f))
|
| 305 |
+
>>> os.rmdir(tempdir)
|
| 306 |
+
|
| 307 |
+
Lazy Loader
|
| 308 |
+
-----------
|
| 309 |
+
A lazy loader is a wrapper object that defers loading a resource until
|
| 310 |
+
it is accessed or used in any way. This is mainly intended for
|
| 311 |
+
internal use by NLTK's corpus readers.
|
| 312 |
+
|
| 313 |
+
>>> # Create a lazy loader for toy.cfg.
|
| 314 |
+
>>> ll = nltk.data.LazyLoader('grammars/sample_grammars/toy.cfg')
|
| 315 |
+
|
| 316 |
+
>>> # Show that it's not loaded yet:
|
| 317 |
+
>>> object.__repr__(ll)
|
| 318 |
+
'<nltk.data.LazyLoader object at ...>'
|
| 319 |
+
|
| 320 |
+
>>> # printing it is enough to cause it to be loaded:
|
| 321 |
+
>>> print(ll)
|
| 322 |
+
<Grammar with 14 productions>
|
| 323 |
+
|
| 324 |
+
>>> # Show that it's now been loaded:
|
| 325 |
+
>>> object.__repr__(ll)
|
| 326 |
+
'<nltk.grammar.CFG object at ...>'
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
>>> # Test that accessing an attribute also loads it:
|
| 330 |
+
>>> ll = nltk.data.LazyLoader('grammars/sample_grammars/toy.cfg')
|
| 331 |
+
>>> ll.start()
|
| 332 |
+
S
|
| 333 |
+
>>> object.__repr__(ll)
|
| 334 |
+
'<nltk.grammar.CFG object at ...>'
|
| 335 |
+
|
| 336 |
+
Buffered Gzip Reading and Writing
|
| 337 |
+
---------------------------------
|
| 338 |
+
Write performance to gzip-compressed is extremely poor when the files become large.
|
| 339 |
+
File creation can become a bottleneck in those cases.
|
| 340 |
+
|
| 341 |
+
Read performance from large gzipped pickle files was improved in data.py by
|
| 342 |
+
buffering the reads. A similar fix can be applied to writes by buffering
|
| 343 |
+
the writes to a StringIO object first.
|
| 344 |
+
|
| 345 |
+
This is mainly intended for internal use. The test simply tests that reading
|
| 346 |
+
and writing work as intended and does not test how much improvement buffering
|
| 347 |
+
provides.
|
| 348 |
+
|
| 349 |
+
>>> from io import StringIO
|
| 350 |
+
>>> test = nltk.data.BufferedGzipFile('testbuf.gz', 'wb', size=2**10)
|
| 351 |
+
>>> ans = []
|
| 352 |
+
>>> for i in range(10000):
|
| 353 |
+
... ans.append(str(i).encode('ascii'))
|
| 354 |
+
... test.write(str(i).encode('ascii'))
|
| 355 |
+
>>> test.close()
|
| 356 |
+
>>> test = nltk.data.BufferedGzipFile('testbuf.gz', 'rb')
|
| 357 |
+
>>> test.read() == b''.join(ans)
|
| 358 |
+
True
|
| 359 |
+
>>> test.close()
|
| 360 |
+
>>> import os
|
| 361 |
+
>>> os.unlink('testbuf.gz')
|
| 362 |
+
|
| 363 |
+
JSON Encoding and Decoding
|
| 364 |
+
--------------------------
|
| 365 |
+
JSON serialization is used instead of pickle for some classes.
|
| 366 |
+
|
| 367 |
+
>>> from nltk import jsontags
|
| 368 |
+
>>> from nltk.jsontags import JSONTaggedEncoder, JSONTaggedDecoder, register_tag
|
| 369 |
+
>>> @jsontags.register_tag
|
| 370 |
+
... class JSONSerializable:
|
| 371 |
+
... json_tag = 'JSONSerializable'
|
| 372 |
+
...
|
| 373 |
+
... def __init__(self, n):
|
| 374 |
+
... self.n = n
|
| 375 |
+
...
|
| 376 |
+
... def encode_json_obj(self):
|
| 377 |
+
... return self.n
|
| 378 |
+
...
|
| 379 |
+
... @classmethod
|
| 380 |
+
... def decode_json_obj(cls, obj):
|
| 381 |
+
... n = obj
|
| 382 |
+
... return cls(n)
|
| 383 |
+
...
|
| 384 |
+
>>> JSONTaggedEncoder().encode(JSONSerializable(1))
|
| 385 |
+
'{"!JSONSerializable": 1}'
|
| 386 |
+
>>> JSONTaggedDecoder().decode('{"!JSONSerializable": 1}').n
|
| 387 |
+
1
|
lib/python3.10/site-packages/nltk/test/dependency.doctest
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
===================
|
| 5 |
+
Dependency Grammars
|
| 6 |
+
===================
|
| 7 |
+
|
| 8 |
+
>>> from nltk.grammar import DependencyGrammar
|
| 9 |
+
>>> from nltk.parse import (
|
| 10 |
+
... DependencyGraph,
|
| 11 |
+
... ProjectiveDependencyParser,
|
| 12 |
+
... NonprojectiveDependencyParser,
|
| 13 |
+
... )
|
| 14 |
+
|
| 15 |
+
CoNLL Data
|
| 16 |
+
----------
|
| 17 |
+
|
| 18 |
+
>>> treebank_data = """Pierre NNP 2 NMOD
|
| 19 |
+
... Vinken NNP 8 SUB
|
| 20 |
+
... , , 2 P
|
| 21 |
+
... 61 CD 5 NMOD
|
| 22 |
+
... years NNS 6 AMOD
|
| 23 |
+
... old JJ 2 NMOD
|
| 24 |
+
... , , 2 P
|
| 25 |
+
... will MD 0 ROOT
|
| 26 |
+
... join VB 8 VC
|
| 27 |
+
... the DT 11 NMOD
|
| 28 |
+
... board NN 9 OBJ
|
| 29 |
+
... as IN 9 VMOD
|
| 30 |
+
... a DT 15 NMOD
|
| 31 |
+
... nonexecutive JJ 15 NMOD
|
| 32 |
+
... director NN 12 PMOD
|
| 33 |
+
... Nov. NNP 9 VMOD
|
| 34 |
+
... 29 CD 16 NMOD
|
| 35 |
+
... . . 9 VMOD
|
| 36 |
+
... """
|
| 37 |
+
|
| 38 |
+
>>> dg = DependencyGraph(treebank_data)
|
| 39 |
+
>>> dg.tree().pprint()
|
| 40 |
+
(will
|
| 41 |
+
(Vinken Pierre , (old (years 61)) ,)
|
| 42 |
+
(join (board the) (as (director a nonexecutive)) (Nov. 29) .))
|
| 43 |
+
>>> for head, rel, dep in dg.triples():
|
| 44 |
+
... print(
|
| 45 |
+
... '({h[0]}, {h[1]}), {r}, ({d[0]}, {d[1]})'
|
| 46 |
+
... .format(h=head, r=rel, d=dep)
|
| 47 |
+
... )
|
| 48 |
+
(will, MD), SUB, (Vinken, NNP)
|
| 49 |
+
(Vinken, NNP), NMOD, (Pierre, NNP)
|
| 50 |
+
(Vinken, NNP), P, (,, ,)
|
| 51 |
+
(Vinken, NNP), NMOD, (old, JJ)
|
| 52 |
+
(old, JJ), AMOD, (years, NNS)
|
| 53 |
+
(years, NNS), NMOD, (61, CD)
|
| 54 |
+
(Vinken, NNP), P, (,, ,)
|
| 55 |
+
(will, MD), VC, (join, VB)
|
| 56 |
+
(join, VB), OBJ, (board, NN)
|
| 57 |
+
(board, NN), NMOD, (the, DT)
|
| 58 |
+
(join, VB), VMOD, (as, IN)
|
| 59 |
+
(as, IN), PMOD, (director, NN)
|
| 60 |
+
(director, NN), NMOD, (a, DT)
|
| 61 |
+
(director, NN), NMOD, (nonexecutive, JJ)
|
| 62 |
+
(join, VB), VMOD, (Nov., NNP)
|
| 63 |
+
(Nov., NNP), NMOD, (29, CD)
|
| 64 |
+
(join, VB), VMOD, (., .)
|
| 65 |
+
|
| 66 |
+
Using a custom cell extractor.
|
| 67 |
+
|
| 68 |
+
>>> def custom_extractor(cells):
|
| 69 |
+
... _, tag, head, rel = cells
|
| 70 |
+
... return 'spam', 'spam', tag, tag, '', head, rel
|
| 71 |
+
>>> dg = DependencyGraph(treebank_data, cell_extractor=custom_extractor)
|
| 72 |
+
>>> dg.tree().pprint()
|
| 73 |
+
(spam
|
| 74 |
+
(spam spam spam (spam (spam spam)) spam)
|
| 75 |
+
(spam (spam spam) (spam (spam spam spam)) (spam spam) spam))
|
| 76 |
+
|
| 77 |
+
Custom cell extractors can take in and return an index.
|
| 78 |
+
|
| 79 |
+
>>> def custom_extractor(cells, index):
|
| 80 |
+
... word, tag, head, rel = cells
|
| 81 |
+
... return (index, '{}-{}'.format(word, index), word,
|
| 82 |
+
... tag, tag, '', head, rel)
|
| 83 |
+
>>> dg = DependencyGraph(treebank_data, cell_extractor=custom_extractor)
|
| 84 |
+
>>> dg.tree().pprint()
|
| 85 |
+
(will-8
|
| 86 |
+
(Vinken-2 Pierre-1 ,-3 (old-6 (years-5 61-4)) ,-7)
|
| 87 |
+
(join-9
|
| 88 |
+
(board-11 the-10)
|
| 89 |
+
(as-12 (director-15 a-13 nonexecutive-14))
|
| 90 |
+
(Nov.-16 29-17)
|
| 91 |
+
.-18))
|
| 92 |
+
|
| 93 |
+
Using the dependency-parsed version of the Penn Treebank corpus sample.
|
| 94 |
+
|
| 95 |
+
>>> from nltk.corpus import dependency_treebank
|
| 96 |
+
>>> t = dependency_treebank.parsed_sents()[0]
|
| 97 |
+
>>> print(t.to_conll(3))
|
| 98 |
+
Pierre NNP 2
|
| 99 |
+
Vinken NNP 8
|
| 100 |
+
, , 2
|
| 101 |
+
61 CD 5
|
| 102 |
+
years NNS 6
|
| 103 |
+
old JJ 2
|
| 104 |
+
, , 2
|
| 105 |
+
will MD 0
|
| 106 |
+
join VB 8
|
| 107 |
+
the DT 11
|
| 108 |
+
board NN 9
|
| 109 |
+
as IN 9
|
| 110 |
+
a DT 15
|
| 111 |
+
nonexecutive JJ 15
|
| 112 |
+
director NN 12
|
| 113 |
+
Nov. NNP 9
|
| 114 |
+
29 CD 16
|
| 115 |
+
. . 8
|
| 116 |
+
|
| 117 |
+
Using the output of zpar (like Malt-TAB but with zero-based indexing)
|
| 118 |
+
|
| 119 |
+
>>> zpar_data = """
|
| 120 |
+
... Pierre NNP 1 NMOD
|
| 121 |
+
... Vinken NNP 7 SUB
|
| 122 |
+
... , , 1 P
|
| 123 |
+
... 61 CD 4 NMOD
|
| 124 |
+
... years NNS 5 AMOD
|
| 125 |
+
... old JJ 1 NMOD
|
| 126 |
+
... , , 1 P
|
| 127 |
+
... will MD -1 ROOT
|
| 128 |
+
... join VB 7 VC
|
| 129 |
+
... the DT 10 NMOD
|
| 130 |
+
... board NN 8 OBJ
|
| 131 |
+
... as IN 8 VMOD
|
| 132 |
+
... a DT 14 NMOD
|
| 133 |
+
... nonexecutive JJ 14 NMOD
|
| 134 |
+
... director NN 11 PMOD
|
| 135 |
+
... Nov. NNP 8 VMOD
|
| 136 |
+
... 29 CD 15 NMOD
|
| 137 |
+
... . . 7 P
|
| 138 |
+
... """
|
| 139 |
+
|
| 140 |
+
>>> zdg = DependencyGraph(zpar_data, zero_based=True)
|
| 141 |
+
>>> print(zdg.tree())
|
| 142 |
+
(will
|
| 143 |
+
(Vinken Pierre , (old (years 61)) ,)
|
| 144 |
+
(join (board the) (as (director a nonexecutive)) (Nov. 29))
|
| 145 |
+
.)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
Projective Dependency Parsing
|
| 149 |
+
-----------------------------
|
| 150 |
+
|
| 151 |
+
>>> grammar = DependencyGrammar.fromstring("""
|
| 152 |
+
... 'fell' -> 'price' | 'stock'
|
| 153 |
+
... 'price' -> 'of' 'the'
|
| 154 |
+
... 'of' -> 'stock'
|
| 155 |
+
... 'stock' -> 'the'
|
| 156 |
+
... """)
|
| 157 |
+
>>> print(grammar)
|
| 158 |
+
Dependency grammar with 5 productions
|
| 159 |
+
'fell' -> 'price'
|
| 160 |
+
'fell' -> 'stock'
|
| 161 |
+
'price' -> 'of' 'the'
|
| 162 |
+
'of' -> 'stock'
|
| 163 |
+
'stock' -> 'the'
|
| 164 |
+
|
| 165 |
+
>>> dp = ProjectiveDependencyParser(grammar)
|
| 166 |
+
>>> for t in sorted(dp.parse(['the', 'price', 'of', 'the', 'stock', 'fell'])):
|
| 167 |
+
... print(t)
|
| 168 |
+
(fell (price the (of (stock the))))
|
| 169 |
+
(fell (price the of) (stock the))
|
| 170 |
+
(fell (price the of the) stock)
|
| 171 |
+
|
| 172 |
+
Non-Projective Dependency Parsing
|
| 173 |
+
---------------------------------
|
| 174 |
+
|
| 175 |
+
>>> grammar = DependencyGrammar.fromstring("""
|
| 176 |
+
... 'taught' -> 'play' | 'man'
|
| 177 |
+
... 'man' -> 'the'
|
| 178 |
+
... 'play' -> 'golf' | 'dog' | 'to'
|
| 179 |
+
... 'dog' -> 'his'
|
| 180 |
+
... """)
|
| 181 |
+
>>> print(grammar)
|
| 182 |
+
Dependency grammar with 7 productions
|
| 183 |
+
'taught' -> 'play'
|
| 184 |
+
'taught' -> 'man'
|
| 185 |
+
'man' -> 'the'
|
| 186 |
+
'play' -> 'golf'
|
| 187 |
+
'play' -> 'dog'
|
| 188 |
+
'play' -> 'to'
|
| 189 |
+
'dog' -> 'his'
|
| 190 |
+
|
| 191 |
+
>>> dp = NonprojectiveDependencyParser(grammar)
|
| 192 |
+
>>> g, = dp.parse(['the', 'man', 'taught', 'his', 'dog', 'to', 'play', 'golf'])
|
| 193 |
+
|
| 194 |
+
>>> print(g.root['word'])
|
| 195 |
+
taught
|
| 196 |
+
|
| 197 |
+
>>> for _, node in sorted(g.nodes.items()):
|
| 198 |
+
... if node['word'] is not None:
|
| 199 |
+
... print('{address} {word}: {d}'.format(d=node['deps'][''], **node))
|
| 200 |
+
1 the: []
|
| 201 |
+
2 man: [1]
|
| 202 |
+
3 taught: [2, 7]
|
| 203 |
+
4 his: []
|
| 204 |
+
5 dog: [4]
|
| 205 |
+
6 to: []
|
| 206 |
+
7 play: [5, 6, 8]
|
| 207 |
+
8 golf: []
|
| 208 |
+
|
| 209 |
+
>>> print(g.tree())
|
| 210 |
+
(taught (man the) (play (dog his) to golf))
|
| 211 |
+
|
| 212 |
+
Integration with MALT parser
|
| 213 |
+
============================
|
| 214 |
+
|
| 215 |
+
In case the top relation is different from the default, we can set it. In case
|
| 216 |
+
of MALT parser, it's set to `'null'`.
|
| 217 |
+
|
| 218 |
+
>>> dg_str = """1 I _ NN NN _ 2 nn _ _
|
| 219 |
+
... 2 shot _ NN NN _ 0 null _ _
|
| 220 |
+
... 3 an _ AT AT _ 2 dep _ _
|
| 221 |
+
... 4 elephant _ NN NN _ 7 nn _ _
|
| 222 |
+
... 5 in _ NN NN _ 7 nn _ _
|
| 223 |
+
... 6 my _ NN NN _ 7 nn _ _
|
| 224 |
+
... 7 pajamas _ NNS NNS _ 3 dobj _ _
|
| 225 |
+
... """
|
| 226 |
+
>>> dg = DependencyGraph(dg_str, top_relation_label='null')
|
| 227 |
+
|
| 228 |
+
>>> len(dg.nodes)
|
| 229 |
+
8
|
| 230 |
+
|
| 231 |
+
>>> dg.root['word'], dg.root['address']
|
| 232 |
+
('shot', 2)
|
| 233 |
+
|
| 234 |
+
>>> print(dg.to_conll(10))
|
| 235 |
+
1 I _ NN NN _ 2 nn _ _
|
| 236 |
+
2 shot _ NN NN _ 0 null _ _
|
| 237 |
+
3 an _ AT AT _ 2 dep _ _
|
| 238 |
+
4 elephant _ NN NN _ 7 nn _ _
|
| 239 |
+
5 in _ NN NN _ 7 nn _ _
|
| 240 |
+
6 my _ NN NN _ 7 nn _ _
|
| 241 |
+
7 pajamas _ NNS NNS _ 3 dobj _ _
|
lib/python3.10/site-packages/nltk/test/discourse.doctest
ADDED
|
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
==================
|
| 5 |
+
Discourse Checking
|
| 6 |
+
==================
|
| 7 |
+
|
| 8 |
+
>>> from nltk import *
|
| 9 |
+
>>> from nltk.sem import logic
|
| 10 |
+
>>> logic._counter._value = 0
|
| 11 |
+
|
| 12 |
+
Setup
|
| 13 |
+
=====
|
| 14 |
+
|
| 15 |
+
>>> from nltk.test.childes_fixt import setup_module
|
| 16 |
+
>>> setup_module()
|
| 17 |
+
|
| 18 |
+
Introduction
|
| 19 |
+
============
|
| 20 |
+
|
| 21 |
+
The NLTK discourse module makes it possible to test consistency and
|
| 22 |
+
redundancy of simple discourses, using theorem-proving and
|
| 23 |
+
model-building from `nltk.inference`.
|
| 24 |
+
|
| 25 |
+
The ``DiscourseTester`` constructor takes a list of sentences as a
|
| 26 |
+
parameter.
|
| 27 |
+
|
| 28 |
+
>>> dt = DiscourseTester(['a boxer walks', 'every boxer chases a girl'])
|
| 29 |
+
|
| 30 |
+
The ``DiscourseTester`` parses each sentence into a list of logical
|
| 31 |
+
forms. Once we have created ``DiscourseTester`` object, we can
|
| 32 |
+
inspect various properties of the discourse. First off, we might want
|
| 33 |
+
to double-check what sentences are currently stored as the discourse.
|
| 34 |
+
|
| 35 |
+
>>> dt.sentences()
|
| 36 |
+
s0: a boxer walks
|
| 37 |
+
s1: every boxer chases a girl
|
| 38 |
+
|
| 39 |
+
As you will see, each sentence receives an identifier `s`\ :subscript:`i`.
|
| 40 |
+
We might also want to check what grammar the ``DiscourseTester`` is
|
| 41 |
+
using (by default, ``book_grammars/discourse.fcfg``):
|
| 42 |
+
|
| 43 |
+
>>> dt.grammar()
|
| 44 |
+
% start S
|
| 45 |
+
# Grammar Rules
|
| 46 |
+
S[SEM = <app(?subj,?vp)>] -> NP[NUM=?n,SEM=?subj] VP[NUM=?n,SEM=?vp]
|
| 47 |
+
NP[NUM=?n,SEM=<app(?det,?nom)> ] -> Det[NUM=?n,SEM=?det] Nom[NUM=?n,SEM=?nom]
|
| 48 |
+
NP[LOC=?l,NUM=?n,SEM=?np] -> PropN[LOC=?l,NUM=?n,SEM=?np]
|
| 49 |
+
...
|
| 50 |
+
|
| 51 |
+
A different grammar can be invoked by using the optional ``gramfile``
|
| 52 |
+
parameter when a ``DiscourseTester`` object is created.
|
| 53 |
+
|
| 54 |
+
Readings and Threads
|
| 55 |
+
====================
|
| 56 |
+
|
| 57 |
+
Depending on
|
| 58 |
+
the grammar used, we may find some sentences have more than one
|
| 59 |
+
logical form. To check this, use the ``readings()`` method. Given a
|
| 60 |
+
sentence identifier of the form `s`\ :subscript:`i`, each reading of
|
| 61 |
+
that sentence is given an identifier `s`\ :sub:`i`-`r`\ :sub:`j`.
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
>>> dt.readings()
|
| 65 |
+
<BLANKLINE>
|
| 66 |
+
s0 readings:
|
| 67 |
+
<BLANKLINE>
|
| 68 |
+
s0-r0: exists z1.(boxer(z1) & walk(z1))
|
| 69 |
+
s0-r1: exists z1.(boxerdog(z1) & walk(z1))
|
| 70 |
+
<BLANKLINE>
|
| 71 |
+
s1 readings:
|
| 72 |
+
<BLANKLINE>
|
| 73 |
+
s1-r0: all z2.(boxer(z2) -> exists z3.(girl(z3) & chase(z2,z3)))
|
| 74 |
+
s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
In this case, the only source of ambiguity lies in the word *boxer*,
|
| 78 |
+
which receives two translations: ``boxer`` and ``boxerdog``. The
|
| 79 |
+
intention is that one of these corresponds to the ``person`` sense and
|
| 80 |
+
one to the ``dog`` sense. In principle, we would also expect to see a
|
| 81 |
+
quantifier scope ambiguity in ``s1``. However, the simple grammar we
|
| 82 |
+
are using, namely `sem4.fcfg <sem4.fcfg>`_, doesn't support quantifier
|
| 83 |
+
scope ambiguity.
|
| 84 |
+
|
| 85 |
+
We can also investigate the readings of a specific sentence:
|
| 86 |
+
|
| 87 |
+
>>> dt.readings('a boxer walks')
|
| 88 |
+
The sentence 'a boxer walks' has these readings:
|
| 89 |
+
exists x.(boxer(x) & walk(x))
|
| 90 |
+
exists x.(boxerdog(x) & walk(x))
|
| 91 |
+
|
| 92 |
+
Given that each sentence is two-ways ambiguous, we potentially have
|
| 93 |
+
four different discourse 'threads', taking all combinations of
|
| 94 |
+
readings. To see these, specify the ``threaded=True`` parameter on
|
| 95 |
+
the ``readings()`` method. Again, each thread is assigned an
|
| 96 |
+
identifier of the form `d`\ :sub:`i`. Following the identifier is a
|
| 97 |
+
list of the readings that constitute that thread.
|
| 98 |
+
|
| 99 |
+
>>> dt.readings(threaded=True)
|
| 100 |
+
d0: ['s0-r0', 's1-r0']
|
| 101 |
+
d1: ['s0-r0', 's1-r1']
|
| 102 |
+
d2: ['s0-r1', 's1-r0']
|
| 103 |
+
d3: ['s0-r1', 's1-r1']
|
| 104 |
+
|
| 105 |
+
Of course, this simple-minded approach doesn't scale: a discourse with, say, three
|
| 106 |
+
sentences, each of which has 3 readings, will generate 27 different
|
| 107 |
+
threads. It is an interesting exercise to consider how to manage
|
| 108 |
+
discourse ambiguity more efficiently.
|
| 109 |
+
|
| 110 |
+
Checking Consistency
|
| 111 |
+
====================
|
| 112 |
+
|
| 113 |
+
Now, we can check whether some or all of the discourse threads are
|
| 114 |
+
consistent, using the ``models()`` method. With no parameter, this
|
| 115 |
+
method will try to find a model for every discourse thread in the
|
| 116 |
+
current discourse. However, we can also specify just one thread, say ``d1``.
|
| 117 |
+
|
| 118 |
+
>>> dt.models('d1')
|
| 119 |
+
--------------------------------------------------------------------------------
|
| 120 |
+
Model for Discourse Thread d1
|
| 121 |
+
--------------------------------------------------------------------------------
|
| 122 |
+
% number = 1
|
| 123 |
+
% seconds = 0
|
| 124 |
+
<BLANKLINE>
|
| 125 |
+
% Interpretation of size 2
|
| 126 |
+
<BLANKLINE>
|
| 127 |
+
c1 = 0.
|
| 128 |
+
<BLANKLINE>
|
| 129 |
+
f1(0) = 0.
|
| 130 |
+
f1(1) = 0.
|
| 131 |
+
<BLANKLINE>
|
| 132 |
+
boxer(0).
|
| 133 |
+
- boxer(1).
|
| 134 |
+
<BLANKLINE>
|
| 135 |
+
- boxerdog(0).
|
| 136 |
+
- boxerdog(1).
|
| 137 |
+
<BLANKLINE>
|
| 138 |
+
- girl(0).
|
| 139 |
+
- girl(1).
|
| 140 |
+
<BLANKLINE>
|
| 141 |
+
walk(0).
|
| 142 |
+
- walk(1).
|
| 143 |
+
<BLANKLINE>
|
| 144 |
+
- chase(0,0).
|
| 145 |
+
- chase(0,1).
|
| 146 |
+
- chase(1,0).
|
| 147 |
+
- chase(1,1).
|
| 148 |
+
<BLANKLINE>
|
| 149 |
+
Consistent discourse: d1 ['s0-r0', 's1-r1']:
|
| 150 |
+
s0-r0: exists z1.(boxer(z1) & walk(z1))
|
| 151 |
+
s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
| 152 |
+
<BLANKLINE>
|
| 153 |
+
|
| 154 |
+
There are various formats for rendering **Mace4** models --- here,
|
| 155 |
+
we have used the 'cooked' format (which is intended to be
|
| 156 |
+
human-readable). There are a number of points to note.
|
| 157 |
+
|
| 158 |
+
#. The entities in the domain are all treated as non-negative
|
| 159 |
+
integers. In this case, there are only two entities, ``0`` and
|
| 160 |
+
``1``.
|
| 161 |
+
|
| 162 |
+
#. The ``-`` symbol indicates negation. So ``0`` is the only
|
| 163 |
+
``boxerdog`` and the only thing that ``walk``\ s. Nothing is a
|
| 164 |
+
``boxer``, or a ``girl`` or in the ``chase`` relation. Thus the
|
| 165 |
+
universal sentence is vacuously true.
|
| 166 |
+
|
| 167 |
+
#. ``c1`` is an introduced constant that denotes ``0``.
|
| 168 |
+
|
| 169 |
+
#. ``f1`` is a Skolem function, but it plays no significant role in
|
| 170 |
+
this model.
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
We might want to now add another sentence to the discourse, and there
|
| 174 |
+
is method ``add_sentence()`` for doing just this.
|
| 175 |
+
|
| 176 |
+
>>> dt.add_sentence('John is a boxer')
|
| 177 |
+
>>> dt.sentences()
|
| 178 |
+
s0: a boxer walks
|
| 179 |
+
s1: every boxer chases a girl
|
| 180 |
+
s2: John is a boxer
|
| 181 |
+
|
| 182 |
+
We can now test all the properties as before; here, we just show a
|
| 183 |
+
couple of them.
|
| 184 |
+
|
| 185 |
+
>>> dt.readings()
|
| 186 |
+
<BLANKLINE>
|
| 187 |
+
s0 readings:
|
| 188 |
+
<BLANKLINE>
|
| 189 |
+
s0-r0: exists z1.(boxer(z1) & walk(z1))
|
| 190 |
+
s0-r1: exists z1.(boxerdog(z1) & walk(z1))
|
| 191 |
+
<BLANKLINE>
|
| 192 |
+
s1 readings:
|
| 193 |
+
<BLANKLINE>
|
| 194 |
+
s1-r0: all z1.(boxer(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
| 195 |
+
s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
| 196 |
+
<BLANKLINE>
|
| 197 |
+
s2 readings:
|
| 198 |
+
<BLANKLINE>
|
| 199 |
+
s2-r0: boxer(John)
|
| 200 |
+
s2-r1: boxerdog(John)
|
| 201 |
+
>>> dt.readings(threaded=True)
|
| 202 |
+
d0: ['s0-r0', 's1-r0', 's2-r0']
|
| 203 |
+
d1: ['s0-r0', 's1-r0', 's2-r1']
|
| 204 |
+
d2: ['s0-r0', 's1-r1', 's2-r0']
|
| 205 |
+
d3: ['s0-r0', 's1-r1', 's2-r1']
|
| 206 |
+
d4: ['s0-r1', 's1-r0', 's2-r0']
|
| 207 |
+
d5: ['s0-r1', 's1-r0', 's2-r1']
|
| 208 |
+
d6: ['s0-r1', 's1-r1', 's2-r0']
|
| 209 |
+
d7: ['s0-r1', 's1-r1', 's2-r1']
|
| 210 |
+
|
| 211 |
+
If you are interested in a particular thread, the ``expand_threads()``
|
| 212 |
+
method will remind you of what readings it consists of:
|
| 213 |
+
|
| 214 |
+
>>> thread = dt.expand_threads('d1')
|
| 215 |
+
>>> for rid, reading in thread:
|
| 216 |
+
... print(rid, str(reading.normalize()))
|
| 217 |
+
s0-r0 exists z1.(boxer(z1) & walk(z1))
|
| 218 |
+
s1-r0 all z1.(boxer(z1) -> exists z2.(girl(z2) & chase(z1,z2)))
|
| 219 |
+
s2-r1 boxerdog(John)
|
| 220 |
+
|
| 221 |
+
Suppose we have already defined a discourse, as follows:
|
| 222 |
+
|
| 223 |
+
>>> dt = DiscourseTester(['A student dances', 'Every student is a person'])
|
| 224 |
+
|
| 225 |
+
Now, when we add a new sentence, is it consistent with what we already
|
| 226 |
+
have? The `` consistchk=True`` parameter of ``add_sentence()`` allows
|
| 227 |
+
us to check:
|
| 228 |
+
|
| 229 |
+
>>> dt.add_sentence('No person dances', consistchk=True)
|
| 230 |
+
Inconsistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0']:
|
| 231 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
| 232 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
| 233 |
+
s2-r0: -exists z1.(person(z1) & dance(z1))
|
| 234 |
+
<BLANKLINE>
|
| 235 |
+
>>> dt.readings()
|
| 236 |
+
<BLANKLINE>
|
| 237 |
+
s0 readings:
|
| 238 |
+
<BLANKLINE>
|
| 239 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
| 240 |
+
<BLANKLINE>
|
| 241 |
+
s1 readings:
|
| 242 |
+
<BLANKLINE>
|
| 243 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
| 244 |
+
<BLANKLINE>
|
| 245 |
+
s2 readings:
|
| 246 |
+
<BLANKLINE>
|
| 247 |
+
s2-r0: -exists z1.(person(z1) & dance(z1))
|
| 248 |
+
|
| 249 |
+
So let's retract the inconsistent sentence:
|
| 250 |
+
|
| 251 |
+
>>> dt.retract_sentence('No person dances', verbose=True)
|
| 252 |
+
Current sentences are
|
| 253 |
+
s0: A student dances
|
| 254 |
+
s1: Every student is a person
|
| 255 |
+
|
| 256 |
+
We can now verify that result is consistent.
|
| 257 |
+
|
| 258 |
+
>>> dt.models()
|
| 259 |
+
--------------------------------------------------------------------------------
|
| 260 |
+
Model for Discourse Thread d0
|
| 261 |
+
--------------------------------------------------------------------------------
|
| 262 |
+
% number = 1
|
| 263 |
+
% seconds = 0
|
| 264 |
+
<BLANKLINE>
|
| 265 |
+
% Interpretation of size 2
|
| 266 |
+
<BLANKLINE>
|
| 267 |
+
c1 = 0.
|
| 268 |
+
<BLANKLINE>
|
| 269 |
+
dance(0).
|
| 270 |
+
- dance(1).
|
| 271 |
+
<BLANKLINE>
|
| 272 |
+
person(0).
|
| 273 |
+
- person(1).
|
| 274 |
+
<BLANKLINE>
|
| 275 |
+
student(0).
|
| 276 |
+
- student(1).
|
| 277 |
+
<BLANKLINE>
|
| 278 |
+
Consistent discourse: d0 ['s0-r0', 's1-r0']:
|
| 279 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
| 280 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
| 281 |
+
<BLANKLINE>
|
| 282 |
+
|
| 283 |
+
Checking Informativity
|
| 284 |
+
======================
|
| 285 |
+
|
| 286 |
+
Let's assume that we are still trying to extend the discourse *A
|
| 287 |
+
student dances.* *Every student is a person.* We add a new sentence,
|
| 288 |
+
but this time, we check whether it is informative with respect to what
|
| 289 |
+
has gone before.
|
| 290 |
+
|
| 291 |
+
>>> dt.add_sentence('A person dances', informchk=True)
|
| 292 |
+
Sentence 'A person dances' under reading 'exists x.(person(x) & dance(x))':
|
| 293 |
+
Not informative relative to thread 'd0'
|
| 294 |
+
|
| 295 |
+
In fact, we are just checking whether the new sentence is entailed by
|
| 296 |
+
the preceding discourse.
|
| 297 |
+
|
| 298 |
+
>>> dt.models()
|
| 299 |
+
--------------------------------------------------------------------------------
|
| 300 |
+
Model for Discourse Thread d0
|
| 301 |
+
--------------------------------------------------------------------------------
|
| 302 |
+
% number = 1
|
| 303 |
+
% seconds = 0
|
| 304 |
+
<BLANKLINE>
|
| 305 |
+
% Interpretation of size 2
|
| 306 |
+
<BLANKLINE>
|
| 307 |
+
c1 = 0.
|
| 308 |
+
<BLANKLINE>
|
| 309 |
+
c2 = 0.
|
| 310 |
+
<BLANKLINE>
|
| 311 |
+
dance(0).
|
| 312 |
+
- dance(1).
|
| 313 |
+
<BLANKLINE>
|
| 314 |
+
person(0).
|
| 315 |
+
- person(1).
|
| 316 |
+
<BLANKLINE>
|
| 317 |
+
student(0).
|
| 318 |
+
- student(1).
|
| 319 |
+
<BLANKLINE>
|
| 320 |
+
Consistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0']:
|
| 321 |
+
s0-r0: exists z1.(student(z1) & dance(z1))
|
| 322 |
+
s1-r0: all z1.(student(z1) -> person(z1))
|
| 323 |
+
s2-r0: exists z1.(person(z1) & dance(z1))
|
| 324 |
+
<BLANKLINE>
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
Adding Background Knowledge
|
| 329 |
+
===========================
|
| 330 |
+
|
| 331 |
+
Let's build a new discourse, and look at the readings of the component sentences:
|
| 332 |
+
|
| 333 |
+
>>> dt = DiscourseTester(['Vincent is a boxer', 'Fido is a boxer', 'Vincent is married', 'Fido barks'])
|
| 334 |
+
>>> dt.readings()
|
| 335 |
+
<BLANKLINE>
|
| 336 |
+
s0 readings:
|
| 337 |
+
<BLANKLINE>
|
| 338 |
+
s0-r0: boxer(Vincent)
|
| 339 |
+
s0-r1: boxerdog(Vincent)
|
| 340 |
+
<BLANKLINE>
|
| 341 |
+
s1 readings:
|
| 342 |
+
<BLANKLINE>
|
| 343 |
+
s1-r0: boxer(Fido)
|
| 344 |
+
s1-r1: boxerdog(Fido)
|
| 345 |
+
<BLANKLINE>
|
| 346 |
+
s2 readings:
|
| 347 |
+
<BLANKLINE>
|
| 348 |
+
s2-r0: married(Vincent)
|
| 349 |
+
<BLANKLINE>
|
| 350 |
+
s3 readings:
|
| 351 |
+
<BLANKLINE>
|
| 352 |
+
s3-r0: bark(Fido)
|
| 353 |
+
|
| 354 |
+
This gives us a lot of threads:
|
| 355 |
+
|
| 356 |
+
>>> dt.readings(threaded=True)
|
| 357 |
+
d0: ['s0-r0', 's1-r0', 's2-r0', 's3-r0']
|
| 358 |
+
d1: ['s0-r0', 's1-r1', 's2-r0', 's3-r0']
|
| 359 |
+
d2: ['s0-r1', 's1-r0', 's2-r0', 's3-r0']
|
| 360 |
+
d3: ['s0-r1', 's1-r1', 's2-r0', 's3-r0']
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
We can eliminate some of the readings, and hence some of the threads,
|
| 364 |
+
by adding background information.
|
| 365 |
+
|
| 366 |
+
>>> import nltk.data
|
| 367 |
+
>>> bg = nltk.data.load('grammars/book_grammars/background.fol')
|
| 368 |
+
>>> dt.add_background(bg)
|
| 369 |
+
>>> dt.background()
|
| 370 |
+
all x.(boxerdog(x) -> dog(x))
|
| 371 |
+
all x.(boxer(x) -> person(x))
|
| 372 |
+
all x.-(dog(x) & person(x))
|
| 373 |
+
all x.(married(x) <-> exists y.marry(x,y))
|
| 374 |
+
all x.(bark(x) -> dog(x))
|
| 375 |
+
all x y.(marry(x,y) -> (person(x) & person(y)))
|
| 376 |
+
-(Vincent = Mia)
|
| 377 |
+
-(Vincent = Fido)
|
| 378 |
+
-(Mia = Fido)
|
| 379 |
+
|
| 380 |
+
The background information allows us to reject three of the threads as
|
| 381 |
+
inconsistent. To see what remains, use the ``filter=True`` parameter
|
| 382 |
+
on ``readings()``.
|
| 383 |
+
|
| 384 |
+
>>> dt.readings(filter=True)
|
| 385 |
+
d1: ['s0-r0', 's1-r1', 's2-r0', 's3-r0']
|
| 386 |
+
|
| 387 |
+
The ``models()`` method gives us more information about the surviving thread.
|
| 388 |
+
|
| 389 |
+
>>> dt.models()
|
| 390 |
+
--------------------------------------------------------------------------------
|
| 391 |
+
Model for Discourse Thread d0
|
| 392 |
+
--------------------------------------------------------------------------------
|
| 393 |
+
No model found!
|
| 394 |
+
<BLANKLINE>
|
| 395 |
+
--------------------------------------------------------------------------------
|
| 396 |
+
Model for Discourse Thread d1
|
| 397 |
+
--------------------------------------------------------------------------------
|
| 398 |
+
% number = 1
|
| 399 |
+
% seconds = 0
|
| 400 |
+
<BLANKLINE>
|
| 401 |
+
% Interpretation of size 3
|
| 402 |
+
<BLANKLINE>
|
| 403 |
+
Fido = 0.
|
| 404 |
+
<BLANKLINE>
|
| 405 |
+
Mia = 1.
|
| 406 |
+
<BLANKLINE>
|
| 407 |
+
Vincent = 2.
|
| 408 |
+
<BLANKLINE>
|
| 409 |
+
f1(0) = 0.
|
| 410 |
+
f1(1) = 0.
|
| 411 |
+
f1(2) = 2.
|
| 412 |
+
<BLANKLINE>
|
| 413 |
+
bark(0).
|
| 414 |
+
- bark(1).
|
| 415 |
+
- bark(2).
|
| 416 |
+
<BLANKLINE>
|
| 417 |
+
- boxer(0).
|
| 418 |
+
- boxer(1).
|
| 419 |
+
boxer(2).
|
| 420 |
+
<BLANKLINE>
|
| 421 |
+
boxerdog(0).
|
| 422 |
+
- boxerdog(1).
|
| 423 |
+
- boxerdog(2).
|
| 424 |
+
<BLANKLINE>
|
| 425 |
+
dog(0).
|
| 426 |
+
- dog(1).
|
| 427 |
+
- dog(2).
|
| 428 |
+
<BLANKLINE>
|
| 429 |
+
- married(0).
|
| 430 |
+
- married(1).
|
| 431 |
+
married(2).
|
| 432 |
+
<BLANKLINE>
|
| 433 |
+
- person(0).
|
| 434 |
+
- person(1).
|
| 435 |
+
person(2).
|
| 436 |
+
<BLANKLINE>
|
| 437 |
+
- marry(0,0).
|
| 438 |
+
- marry(0,1).
|
| 439 |
+
- marry(0,2).
|
| 440 |
+
- marry(1,0).
|
| 441 |
+
- marry(1,1).
|
| 442 |
+
- marry(1,2).
|
| 443 |
+
- marry(2,0).
|
| 444 |
+
- marry(2,1).
|
| 445 |
+
marry(2,2).
|
| 446 |
+
<BLANKLINE>
|
| 447 |
+
--------------------------------------------------------------------------------
|
| 448 |
+
Model for Discourse Thread d2
|
| 449 |
+
--------------------------------------------------------------------------------
|
| 450 |
+
No model found!
|
| 451 |
+
<BLANKLINE>
|
| 452 |
+
--------------------------------------------------------------------------------
|
| 453 |
+
Model for Discourse Thread d3
|
| 454 |
+
--------------------------------------------------------------------------------
|
| 455 |
+
No model found!
|
| 456 |
+
<BLANKLINE>
|
| 457 |
+
Inconsistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0', 's3-r0']:
|
| 458 |
+
s0-r0: boxer(Vincent)
|
| 459 |
+
s1-r0: boxer(Fido)
|
| 460 |
+
s2-r0: married(Vincent)
|
| 461 |
+
s3-r0: bark(Fido)
|
| 462 |
+
<BLANKLINE>
|
| 463 |
+
Consistent discourse: d1 ['s0-r0', 's1-r1', 's2-r0', 's3-r0']:
|
| 464 |
+
s0-r0: boxer(Vincent)
|
| 465 |
+
s1-r1: boxerdog(Fido)
|
| 466 |
+
s2-r0: married(Vincent)
|
| 467 |
+
s3-r0: bark(Fido)
|
| 468 |
+
<BLANKLINE>
|
| 469 |
+
Inconsistent discourse: d2 ['s0-r1', 's1-r0', 's2-r0', 's3-r0']:
|
| 470 |
+
s0-r1: boxerdog(Vincent)
|
| 471 |
+
s1-r0: boxer(Fido)
|
| 472 |
+
s2-r0: married(Vincent)
|
| 473 |
+
s3-r0: bark(Fido)
|
| 474 |
+
<BLANKLINE>
|
| 475 |
+
Inconsistent discourse: d3 ['s0-r1', 's1-r1', 's2-r0', 's3-r0']:
|
| 476 |
+
s0-r1: boxerdog(Vincent)
|
| 477 |
+
s1-r1: boxerdog(Fido)
|
| 478 |
+
s2-r0: married(Vincent)
|
| 479 |
+
s3-r0: bark(Fido)
|
| 480 |
+
<BLANKLINE>
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
.. This will not be visible in the html output: create a tempdir to
|
| 484 |
+
play in.
|
| 485 |
+
>>> import tempfile, os
|
| 486 |
+
>>> tempdir = tempfile.mkdtemp()
|
| 487 |
+
>>> old_dir = os.path.abspath('.')
|
| 488 |
+
>>> os.chdir(tempdir)
|
| 489 |
+
|
| 490 |
+
In order to play around with your own version of background knowledge,
|
| 491 |
+
you might want to start off with a local copy of ``background.fol``:
|
| 492 |
+
|
| 493 |
+
>>> nltk.data.retrieve('grammars/book_grammars/background.fol')
|
| 494 |
+
Retrieving 'nltk:grammars/book_grammars/background.fol', saving to 'background.fol'
|
| 495 |
+
|
| 496 |
+
After you have modified the file, the ``load_fol()`` function will parse
|
| 497 |
+
the strings in the file into expressions of ``nltk.sem.logic``.
|
| 498 |
+
|
| 499 |
+
>>> from nltk.inference.discourse import load_fol
|
| 500 |
+
>>> mybg = load_fol(open('background.fol').read())
|
| 501 |
+
|
| 502 |
+
The result can be loaded as an argument of ``add_background()`` in the
|
| 503 |
+
manner shown earlier.
|
| 504 |
+
|
| 505 |
+
.. This will not be visible in the html output: clean up the tempdir.
|
| 506 |
+
>>> os.chdir(old_dir)
|
| 507 |
+
>>> for f in os.listdir(tempdir):
|
| 508 |
+
... os.remove(os.path.join(tempdir, f))
|
| 509 |
+
>>> os.rmdir(tempdir)
|
| 510 |
+
>>> nltk.data.clear_cache()
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
Regression Testing from book
|
| 514 |
+
============================
|
| 515 |
+
|
| 516 |
+
>>> logic._counter._value = 0
|
| 517 |
+
|
| 518 |
+
>>> from nltk.tag import RegexpTagger
|
| 519 |
+
>>> tagger = RegexpTagger(
|
| 520 |
+
... [('^(chases|runs)$', 'VB'),
|
| 521 |
+
... ('^(a)$', 'ex_quant'),
|
| 522 |
+
... ('^(every)$', 'univ_quant'),
|
| 523 |
+
... ('^(dog|boy)$', 'NN'),
|
| 524 |
+
... ('^(He)$', 'PRP')
|
| 525 |
+
... ])
|
| 526 |
+
>>> rc = DrtGlueReadingCommand(depparser=MaltParser(tagger=tagger))
|
| 527 |
+
>>> dt = DiscourseTester(map(str.split, ['Every dog chases a boy', 'He runs']), rc)
|
| 528 |
+
>>> dt.readings()
|
| 529 |
+
<BLANKLINE>
|
| 530 |
+
s0 readings:
|
| 531 |
+
<BLANKLINE>
|
| 532 |
+
s0-r0: ([z2],[boy(z2), (([z5],[dog(z5)]) -> ([],[chases(z5,z2)]))])
|
| 533 |
+
s0-r1: ([],[(([z1],[dog(z1)]) -> ([z2],[boy(z2), chases(z1,z2)]))])
|
| 534 |
+
<BLANKLINE>
|
| 535 |
+
s1 readings:
|
| 536 |
+
<BLANKLINE>
|
| 537 |
+
s1-r0: ([z1],[PRO(z1), runs(z1)])
|
| 538 |
+
>>> dt.readings(show_thread_readings=True)
|
| 539 |
+
d0: ['s0-r0', 's1-r0'] : ([z1,z2],[boy(z1), (([z3],[dog(z3)]) -> ([],[chases(z3,z1)])), (z2 = z1), runs(z2)])
|
| 540 |
+
d1: ['s0-r1', 's1-r0'] : INVALID: AnaphoraResolutionException
|
| 541 |
+
>>> dt.readings(filter=True, show_thread_readings=True)
|
| 542 |
+
d0: ['s0-r0', 's1-r0'] : ([z1,z3],[boy(z1), (([z2],[dog(z2)]) -> ([],[chases(z2,z1)])), (z3 = z1), runs(z3)])
|
| 543 |
+
|
| 544 |
+
>>> logic._counter._value = 0
|
| 545 |
+
|
| 546 |
+
>>> from nltk.parse import FeatureEarleyChartParser
|
| 547 |
+
>>> from nltk.sem.drt import DrtParser
|
| 548 |
+
>>> grammar = nltk.data.load('grammars/book_grammars/drt.fcfg', logic_parser=DrtParser())
|
| 549 |
+
>>> parser = FeatureEarleyChartParser(grammar, trace=0)
|
| 550 |
+
>>> trees = parser.parse('Angus owns a dog'.split())
|
| 551 |
+
>>> print(list(trees)[0].label()['SEM'].simplify().normalize())
|
| 552 |
+
([z1,z2],[Angus(z1), dog(z2), own(z1,z2)])
|
lib/python3.10/site-packages/nltk/test/featgram.doctest
ADDED
|
@@ -0,0 +1,610 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
=========================
|
| 5 |
+
Feature Grammar Parsing
|
| 6 |
+
=========================
|
| 7 |
+
|
| 8 |
+
.. definitions from nltk_book/definitions.rst
|
| 9 |
+
|
| 10 |
+
.. role:: feat
|
| 11 |
+
:class: feature
|
| 12 |
+
.. role:: fval
|
| 13 |
+
:class: fval
|
| 14 |
+
.. |rarr| unicode:: U+2192 .. right arrow
|
| 15 |
+
.. |dot| unicode:: U+2022 .. bullet
|
| 16 |
+
.. |pi| unicode:: U+03C0
|
| 17 |
+
|
| 18 |
+
Grammars can be parsed from strings.
|
| 19 |
+
|
| 20 |
+
>>> import nltk
|
| 21 |
+
>>> from nltk import grammar, parse
|
| 22 |
+
>>> g = """
|
| 23 |
+
... % start DP
|
| 24 |
+
... DP[AGR=?a] -> D[AGR=?a] N[AGR=?a]
|
| 25 |
+
... D[AGR=[NUM='sg', PERS=3]] -> 'this' | 'that'
|
| 26 |
+
... D[AGR=[NUM='pl', PERS=3]] -> 'these' | 'those'
|
| 27 |
+
... D[AGR=[NUM='pl', PERS=1]] -> 'we'
|
| 28 |
+
... D[AGR=[PERS=2]] -> 'you'
|
| 29 |
+
... N[AGR=[NUM='sg', GND='m']] -> 'boy'
|
| 30 |
+
... N[AGR=[NUM='pl', GND='m']] -> 'boys'
|
| 31 |
+
... N[AGR=[NUM='sg', GND='f']] -> 'girl'
|
| 32 |
+
... N[AGR=[NUM='pl', GND='f']] -> 'girls'
|
| 33 |
+
... N[AGR=[NUM='sg']] -> 'student'
|
| 34 |
+
... N[AGR=[NUM='pl']] -> 'students'
|
| 35 |
+
... """
|
| 36 |
+
>>> grammar = grammar.FeatureGrammar.fromstring(g)
|
| 37 |
+
>>> tokens = 'these girls'.split()
|
| 38 |
+
>>> parser = parse.FeatureEarleyChartParser(grammar)
|
| 39 |
+
>>> trees = parser.parse(tokens)
|
| 40 |
+
>>> for tree in trees: print(tree)
|
| 41 |
+
(DP[AGR=[GND='f', NUM='pl', PERS=3]]
|
| 42 |
+
(D[AGR=[NUM='pl', PERS=3]] these)
|
| 43 |
+
(N[AGR=[GND='f', NUM='pl']] girls))
|
| 44 |
+
|
| 45 |
+
In general, when we are trying to develop even a very small grammar,
|
| 46 |
+
it is convenient to put the rules in a file where they can be edited,
|
| 47 |
+
tested and revised. Let's assume that we have saved feat0cfg as a file named
|
| 48 |
+
``'feat0.fcfg'`` and placed it in the NLTK ``data`` directory. We can
|
| 49 |
+
inspect it as follows:
|
| 50 |
+
|
| 51 |
+
>>> nltk.data.show_cfg('grammars/book_grammars/feat0.fcfg')
|
| 52 |
+
% start S
|
| 53 |
+
# ###################
|
| 54 |
+
# Grammar Productions
|
| 55 |
+
# ###################
|
| 56 |
+
# S expansion productions
|
| 57 |
+
S -> NP[NUM=?n] VP[NUM=?n]
|
| 58 |
+
# NP expansion productions
|
| 59 |
+
NP[NUM=?n] -> N[NUM=?n]
|
| 60 |
+
NP[NUM=?n] -> PropN[NUM=?n]
|
| 61 |
+
NP[NUM=?n] -> Det[NUM=?n] N[NUM=?n]
|
| 62 |
+
NP[NUM=pl] -> N[NUM=pl]
|
| 63 |
+
# VP expansion productions
|
| 64 |
+
VP[TENSE=?t, NUM=?n] -> IV[TENSE=?t, NUM=?n]
|
| 65 |
+
VP[TENSE=?t, NUM=?n] -> TV[TENSE=?t, NUM=?n] NP
|
| 66 |
+
# ###################
|
| 67 |
+
# Lexical Productions
|
| 68 |
+
# ###################
|
| 69 |
+
Det[NUM=sg] -> 'this' | 'every'
|
| 70 |
+
Det[NUM=pl] -> 'these' | 'all'
|
| 71 |
+
Det -> 'the' | 'some' | 'several'
|
| 72 |
+
PropN[NUM=sg]-> 'Kim' | 'Jody'
|
| 73 |
+
N[NUM=sg] -> 'dog' | 'girl' | 'car' | 'child'
|
| 74 |
+
N[NUM=pl] -> 'dogs' | 'girls' | 'cars' | 'children'
|
| 75 |
+
IV[TENSE=pres, NUM=sg] -> 'disappears' | 'walks'
|
| 76 |
+
TV[TENSE=pres, NUM=sg] -> 'sees' | 'likes'
|
| 77 |
+
IV[TENSE=pres, NUM=pl] -> 'disappear' | 'walk'
|
| 78 |
+
TV[TENSE=pres, NUM=pl] -> 'see' | 'like'
|
| 79 |
+
IV[TENSE=past] -> 'disappeared' | 'walked'
|
| 80 |
+
TV[TENSE=past] -> 'saw' | 'liked'
|
| 81 |
+
|
| 82 |
+
Assuming we have saved feat0cfg as a file named
|
| 83 |
+
``'feat0.fcfg'``, the function ``parse.load_parser`` allows us to
|
| 84 |
+
read the grammar into NLTK, ready for use in parsing.
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
>>> cp = parse.load_parser('grammars/book_grammars/feat0.fcfg', trace=1)
|
| 88 |
+
>>> sent = 'Kim likes children'
|
| 89 |
+
>>> tokens = sent.split()
|
| 90 |
+
>>> tokens
|
| 91 |
+
['Kim', 'likes', 'children']
|
| 92 |
+
>>> trees = cp.parse(tokens)
|
| 93 |
+
|.Kim .like.chil.|
|
| 94 |
+
|[----] . .| [0:1] 'Kim'
|
| 95 |
+
|. [----] .| [1:2] 'likes'
|
| 96 |
+
|. . [----]| [2:3] 'children'
|
| 97 |
+
|[----] . .| [0:1] PropN[NUM='sg'] -> 'Kim' *
|
| 98 |
+
|[----] . .| [0:1] NP[NUM='sg'] -> PropN[NUM='sg'] *
|
| 99 |
+
|[----> . .| [0:1] S[] -> NP[NUM=?n] * VP[NUM=?n] {?n: 'sg'}
|
| 100 |
+
|. [----] .| [1:2] TV[NUM='sg', TENSE='pres'] -> 'likes' *
|
| 101 |
+
|. [----> .| [1:2] VP[NUM=?n, TENSE=?t] -> TV[NUM=?n, TENSE=?t] * NP[] {?n: 'sg', ?t: 'pres'}
|
| 102 |
+
|. . [----]| [2:3] N[NUM='pl'] -> 'children' *
|
| 103 |
+
|. . [----]| [2:3] NP[NUM='pl'] -> N[NUM='pl'] *
|
| 104 |
+
|. . [---->| [2:3] S[] -> NP[NUM=?n] * VP[NUM=?n] {?n: 'pl'}
|
| 105 |
+
|. [---------]| [1:3] VP[NUM='sg', TENSE='pres'] -> TV[NUM='sg', TENSE='pres'] NP[] *
|
| 106 |
+
|[==============]| [0:3] S[] -> NP[NUM='sg'] VP[NUM='sg'] *
|
| 107 |
+
>>> for tree in trees: print(tree)
|
| 108 |
+
(S[]
|
| 109 |
+
(NP[NUM='sg'] (PropN[NUM='sg'] Kim))
|
| 110 |
+
(VP[NUM='sg', TENSE='pres']
|
| 111 |
+
(TV[NUM='sg', TENSE='pres'] likes)
|
| 112 |
+
(NP[NUM='pl'] (N[NUM='pl'] children))))
|
| 113 |
+
|
| 114 |
+
The parser works directly with
|
| 115 |
+
the underspecified productions given by the grammar. That is, the
|
| 116 |
+
Predictor rule does not attempt to compile out all admissible feature
|
| 117 |
+
combinations before trying to expand the non-terminals on the left hand
|
| 118 |
+
side of a production. However, when the Scanner matches an input word
|
| 119 |
+
against a lexical production that has been predicted, the new edge will
|
| 120 |
+
typically contain fully specified features; e.g., the edge
|
| 121 |
+
[PropN[`num`:feat: = `sg`:fval:] |rarr| 'Kim', (0, 1)]. Recall from
|
| 122 |
+
Chapter 8 that the Fundamental (or Completer) Rule in
|
| 123 |
+
standard CFGs is used to combine an incomplete edge that's expecting a
|
| 124 |
+
nonterminal *B* with a following, complete edge whose left hand side
|
| 125 |
+
matches *B*. In our current setting, rather than checking for a
|
| 126 |
+
complete match, we test whether the expected category *B* will
|
| 127 |
+
unify with the left hand side *B'* of a following complete
|
| 128 |
+
edge. We will explain in more detail in Section 9.2 how
|
| 129 |
+
unification works; for the moment, it is enough to know that as a
|
| 130 |
+
result of unification, any variable values of features in *B* will be
|
| 131 |
+
instantiated by constant values in the corresponding feature structure
|
| 132 |
+
in *B'*, and these instantiated values will be used in the new edge
|
| 133 |
+
added by the Completer. This instantiation can be seen, for example,
|
| 134 |
+
in the edge
|
| 135 |
+
[NP [`num`:feat:\ =\ `sg`:fval:] |rarr| PropN[`num`:feat:\ =\ `sg`:fval:] |dot|, (0, 1)]
|
| 136 |
+
in Example 9.2, where the feature `num`:feat: has been assigned the value `sg`:fval:.
|
| 137 |
+
|
| 138 |
+
Feature structures in NLTK are ... Atomic feature values can be strings or
|
| 139 |
+
integers.
|
| 140 |
+
|
| 141 |
+
>>> fs1 = nltk.FeatStruct(TENSE='past', NUM='sg')
|
| 142 |
+
>>> print(fs1)
|
| 143 |
+
[ NUM = 'sg' ]
|
| 144 |
+
[ TENSE = 'past' ]
|
| 145 |
+
|
| 146 |
+
We can think of a feature structure as being like a Python dictionary,
|
| 147 |
+
and access its values by indexing in the usual way.
|
| 148 |
+
|
| 149 |
+
>>> fs1 = nltk.FeatStruct(PER=3, NUM='pl', GND='fem')
|
| 150 |
+
>>> print(fs1['GND'])
|
| 151 |
+
fem
|
| 152 |
+
|
| 153 |
+
We can also define feature structures which have complex values, as
|
| 154 |
+
discussed earlier.
|
| 155 |
+
|
| 156 |
+
>>> fs2 = nltk.FeatStruct(POS='N', AGR=fs1)
|
| 157 |
+
>>> print(fs2)
|
| 158 |
+
[ [ GND = 'fem' ] ]
|
| 159 |
+
[ AGR = [ NUM = 'pl' ] ]
|
| 160 |
+
[ [ PER = 3 ] ]
|
| 161 |
+
[ ]
|
| 162 |
+
[ POS = 'N' ]
|
| 163 |
+
>>> print(fs2['AGR'])
|
| 164 |
+
[ GND = 'fem' ]
|
| 165 |
+
[ NUM = 'pl' ]
|
| 166 |
+
[ PER = 3 ]
|
| 167 |
+
>>> print(fs2['AGR']['PER'])
|
| 168 |
+
3
|
| 169 |
+
|
| 170 |
+
Feature structures can also be constructed using the ``parse()``
|
| 171 |
+
method of the ``nltk.FeatStruct`` class. Note that in this case, atomic
|
| 172 |
+
feature values do not need to be enclosed in quotes.
|
| 173 |
+
|
| 174 |
+
>>> f1 = nltk.FeatStruct("[NUMBER = sg]")
|
| 175 |
+
>>> f2 = nltk.FeatStruct("[PERSON = 3]")
|
| 176 |
+
>>> print(nltk.unify(f1, f2))
|
| 177 |
+
[ NUMBER = 'sg' ]
|
| 178 |
+
[ PERSON = 3 ]
|
| 179 |
+
|
| 180 |
+
>>> f1 = nltk.FeatStruct("[A = [B = b, D = d]]")
|
| 181 |
+
>>> f2 = nltk.FeatStruct("[A = [C = c, D = d]]")
|
| 182 |
+
>>> print(nltk.unify(f1, f2))
|
| 183 |
+
[ [ B = 'b' ] ]
|
| 184 |
+
[ A = [ C = 'c' ] ]
|
| 185 |
+
[ [ D = 'd' ] ]
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
Feature Structures as Graphs
|
| 189 |
+
----------------------------
|
| 190 |
+
|
| 191 |
+
Feature structures are not inherently tied to linguistic objects; they are
|
| 192 |
+
general purpose structures for representing knowledge. For example, we
|
| 193 |
+
could encode information about a person in a feature structure:
|
| 194 |
+
|
| 195 |
+
>>> person01 = nltk.FeatStruct("[NAME=Lee, TELNO='01 27 86 42 96',AGE=33]")
|
| 196 |
+
>>> print(person01)
|
| 197 |
+
[ AGE = 33 ]
|
| 198 |
+
[ NAME = 'Lee' ]
|
| 199 |
+
[ TELNO = '01 27 86 42 96' ]
|
| 200 |
+
|
| 201 |
+
There are a number of notations for representing reentrancy in
|
| 202 |
+
matrix-style representations of feature structures. In NLTK, we adopt
|
| 203 |
+
the following convention: the first occurrence of a shared feature structure
|
| 204 |
+
is prefixed with an integer in parentheses, such as ``(1)``, and any
|
| 205 |
+
subsequent reference to that structure uses the notation
|
| 206 |
+
``->(1)``, as shown below.
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
>>> fs = nltk.FeatStruct("""[NAME=Lee, ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'],
|
| 210 |
+
... SPOUSE=[NAME=Kim, ADDRESS->(1)]]""")
|
| 211 |
+
>>> print(fs)
|
| 212 |
+
[ ADDRESS = (1) [ NUMBER = 74 ] ]
|
| 213 |
+
[ [ STREET = 'rue Pascal' ] ]
|
| 214 |
+
[ ]
|
| 215 |
+
[ NAME = 'Lee' ]
|
| 216 |
+
[ ]
|
| 217 |
+
[ SPOUSE = [ ADDRESS -> (1) ] ]
|
| 218 |
+
[ [ NAME = 'Kim' ] ]
|
| 219 |
+
|
| 220 |
+
There can be any number of tags within a single feature structure.
|
| 221 |
+
|
| 222 |
+
>>> fs3 = nltk.FeatStruct("[A=(1)[B=b], C=(2)[], D->(1), E->(2)]")
|
| 223 |
+
>>> print(fs3)
|
| 224 |
+
[ A = (1) [ B = 'b' ] ]
|
| 225 |
+
[ ]
|
| 226 |
+
[ C = (2) [] ]
|
| 227 |
+
[ ]
|
| 228 |
+
[ D -> (1) ]
|
| 229 |
+
[ E -> (2) ]
|
| 230 |
+
>>> fs1 = nltk.FeatStruct(NUMBER=74, STREET='rue Pascal')
|
| 231 |
+
>>> fs2 = nltk.FeatStruct(CITY='Paris')
|
| 232 |
+
>>> print(nltk.unify(fs1, fs2))
|
| 233 |
+
[ CITY = 'Paris' ]
|
| 234 |
+
[ NUMBER = 74 ]
|
| 235 |
+
[ STREET = 'rue Pascal' ]
|
| 236 |
+
|
| 237 |
+
Unification is symmetric:
|
| 238 |
+
|
| 239 |
+
>>> nltk.unify(fs1, fs2) == nltk.unify(fs2, fs1)
|
| 240 |
+
True
|
| 241 |
+
|
| 242 |
+
Unification is commutative:
|
| 243 |
+
|
| 244 |
+
>>> fs3 = nltk.FeatStruct(TELNO='01 27 86 42 96')
|
| 245 |
+
>>> nltk.unify(nltk.unify(fs1, fs2), fs3) == nltk.unify(fs1, nltk.unify(fs2, fs3))
|
| 246 |
+
True
|
| 247 |
+
|
| 248 |
+
Unification between *FS*:math:`_0` and *FS*:math:`_1` will fail if the
|
| 249 |
+
two feature structures share a path |pi|,
|
| 250 |
+
but the value of |pi| in *FS*:math:`_0` is a distinct
|
| 251 |
+
atom from the value of |pi| in *FS*:math:`_1`. In NLTK,
|
| 252 |
+
this is implemented by setting the result of unification to be
|
| 253 |
+
``None``.
|
| 254 |
+
|
| 255 |
+
>>> fs0 = nltk.FeatStruct(A='a')
|
| 256 |
+
>>> fs1 = nltk.FeatStruct(A='b')
|
| 257 |
+
>>> print(nltk.unify(fs0, fs1))
|
| 258 |
+
None
|
| 259 |
+
|
| 260 |
+
Now, if we look at how unification interacts with structure-sharing,
|
| 261 |
+
things become really interesting.
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
>>> fs0 = nltk.FeatStruct("""[NAME=Lee,
|
| 266 |
+
... ADDRESS=[NUMBER=74,
|
| 267 |
+
... STREET='rue Pascal'],
|
| 268 |
+
... SPOUSE= [NAME=Kim,
|
| 269 |
+
... ADDRESS=[NUMBER=74,
|
| 270 |
+
... STREET='rue Pascal']]]""")
|
| 271 |
+
>>> print(fs0)
|
| 272 |
+
[ ADDRESS = [ NUMBER = 74 ] ]
|
| 273 |
+
[ [ STREET = 'rue Pascal' ] ]
|
| 274 |
+
[ ]
|
| 275 |
+
[ NAME = 'Lee' ]
|
| 276 |
+
[ ]
|
| 277 |
+
[ [ ADDRESS = [ NUMBER = 74 ] ] ]
|
| 278 |
+
[ SPOUSE = [ [ STREET = 'rue Pascal' ] ] ]
|
| 279 |
+
[ [ ] ]
|
| 280 |
+
[ [ NAME = 'Kim' ] ]
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
>>> fs1 = nltk.FeatStruct("[SPOUSE=[ADDRESS=[CITY=Paris]]]")
|
| 284 |
+
>>> print(nltk.unify(fs0, fs1))
|
| 285 |
+
[ ADDRESS = [ NUMBER = 74 ] ]
|
| 286 |
+
[ [ STREET = 'rue Pascal' ] ]
|
| 287 |
+
[ ]
|
| 288 |
+
[ NAME = 'Lee' ]
|
| 289 |
+
[ ]
|
| 290 |
+
[ [ [ CITY = 'Paris' ] ] ]
|
| 291 |
+
[ [ ADDRESS = [ NUMBER = 74 ] ] ]
|
| 292 |
+
[ SPOUSE = [ [ STREET = 'rue Pascal' ] ] ]
|
| 293 |
+
[ [ ] ]
|
| 294 |
+
[ [ NAME = 'Kim' ] ]
|
| 295 |
+
|
| 296 |
+
>>> fs2 = nltk.FeatStruct("""[NAME=Lee, ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'],
|
| 297 |
+
... SPOUSE=[NAME=Kim, ADDRESS->(1)]]""")
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
>>> print(fs2)
|
| 301 |
+
[ ADDRESS = (1) [ NUMBER = 74 ] ]
|
| 302 |
+
[ [ STREET = 'rue Pascal' ] ]
|
| 303 |
+
[ ]
|
| 304 |
+
[ NAME = 'Lee' ]
|
| 305 |
+
[ ]
|
| 306 |
+
[ SPOUSE = [ ADDRESS -> (1) ] ]
|
| 307 |
+
[ [ NAME = 'Kim' ] ]
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
>>> print(nltk.unify(fs2, fs1))
|
| 311 |
+
[ [ CITY = 'Paris' ] ]
|
| 312 |
+
[ ADDRESS = (1) [ NUMBER = 74 ] ]
|
| 313 |
+
[ [ STREET = 'rue Pascal' ] ]
|
| 314 |
+
[ ]
|
| 315 |
+
[ NAME = 'Lee' ]
|
| 316 |
+
[ ]
|
| 317 |
+
[ SPOUSE = [ ADDRESS -> (1) ] ]
|
| 318 |
+
[ [ NAME = 'Kim' ] ]
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
>>> fs1 = nltk.FeatStruct("[ADDRESS1=[NUMBER=74, STREET='rue Pascal']]")
|
| 322 |
+
>>> fs2 = nltk.FeatStruct("[ADDRESS1=?x, ADDRESS2=?x]")
|
| 323 |
+
>>> print(fs2)
|
| 324 |
+
[ ADDRESS1 = ?x ]
|
| 325 |
+
[ ADDRESS2 = ?x ]
|
| 326 |
+
>>> print(nltk.unify(fs1, fs2))
|
| 327 |
+
[ ADDRESS1 = (1) [ NUMBER = 74 ] ]
|
| 328 |
+
[ [ STREET = 'rue Pascal' ] ]
|
| 329 |
+
[ ]
|
| 330 |
+
[ ADDRESS2 -> (1) ]
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
>>> sent = 'who do you claim that you like'
|
| 336 |
+
>>> tokens = sent.split()
|
| 337 |
+
>>> cp = parse.load_parser('grammars/book_grammars/feat1.fcfg', trace=1)
|
| 338 |
+
>>> trees = cp.parse(tokens)
|
| 339 |
+
|.w.d.y.c.t.y.l.|
|
| 340 |
+
|[-] . . . . . .| [0:1] 'who'
|
| 341 |
+
|. [-] . . . . .| [1:2] 'do'
|
| 342 |
+
|. . [-] . . . .| [2:3] 'you'
|
| 343 |
+
|. . . [-] . . .| [3:4] 'claim'
|
| 344 |
+
|. . . . [-] . .| [4:5] 'that'
|
| 345 |
+
|. . . . . [-] .| [5:6] 'you'
|
| 346 |
+
|. . . . . . [-]| [6:7] 'like'
|
| 347 |
+
|# . . . . . . .| [0:0] NP[]/NP[] -> *
|
| 348 |
+
|. # . . . . . .| [1:1] NP[]/NP[] -> *
|
| 349 |
+
|. . # . . . . .| [2:2] NP[]/NP[] -> *
|
| 350 |
+
|. . . # . . . .| [3:3] NP[]/NP[] -> *
|
| 351 |
+
|. . . . # . . .| [4:4] NP[]/NP[] -> *
|
| 352 |
+
|. . . . . # . .| [5:5] NP[]/NP[] -> *
|
| 353 |
+
|. . . . . . # .| [6:6] NP[]/NP[] -> *
|
| 354 |
+
|. . . . . . . #| [7:7] NP[]/NP[] -> *
|
| 355 |
+
|[-] . . . . . .| [0:1] NP[+WH] -> 'who' *
|
| 356 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * VP[] {}
|
| 357 |
+
|[-> . . . . . .| [0:1] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
| 358 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * S[]/NP[] {}
|
| 359 |
+
|. [-] . . . . .| [1:2] V[+AUX] -> 'do' *
|
| 360 |
+
|. [-> . . . . .| [1:2] S[+INV] -> V[+AUX] * NP[] VP[] {}
|
| 361 |
+
|. [-> . . . . .| [1:2] S[+INV]/?x[] -> V[+AUX] * NP[] VP[]/?x[] {}
|
| 362 |
+
|. [-> . . . . .| [1:2] VP[] -> V[+AUX] * VP[] {}
|
| 363 |
+
|. [-> . . . . .| [1:2] VP[]/?x[] -> V[+AUX] * VP[]/?x[] {}
|
| 364 |
+
|. . [-] . . . .| [2:3] NP[-WH] -> 'you' *
|
| 365 |
+
|. . [-> . . . .| [2:3] S[-INV] -> NP[] * VP[] {}
|
| 366 |
+
|. . [-> . . . .| [2:3] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
| 367 |
+
|. . [-> . . . .| [2:3] S[-INV] -> NP[] * S[]/NP[] {}
|
| 368 |
+
|. [---> . . . .| [1:3] S[+INV] -> V[+AUX] NP[] * VP[] {}
|
| 369 |
+
|. [---> . . . .| [1:3] S[+INV]/?x[] -> V[+AUX] NP[] * VP[]/?x[] {}
|
| 370 |
+
|. . . [-] . . .| [3:4] V[-AUX, SUBCAT='clause'] -> 'claim' *
|
| 371 |
+
|. . . [-> . . .| [3:4] VP[] -> V[-AUX, SUBCAT='clause'] * SBar[] {}
|
| 372 |
+
|. . . [-> . . .| [3:4] VP[]/?x[] -> V[-AUX, SUBCAT='clause'] * SBar[]/?x[] {}
|
| 373 |
+
|. . . . [-] . .| [4:5] Comp[] -> 'that' *
|
| 374 |
+
|. . . . [-> . .| [4:5] SBar[] -> Comp[] * S[-INV] {}
|
| 375 |
+
|. . . . [-> . .| [4:5] SBar[]/?x[] -> Comp[] * S[-INV]/?x[] {}
|
| 376 |
+
|. . . . . [-] .| [5:6] NP[-WH] -> 'you' *
|
| 377 |
+
|. . . . . [-> .| [5:6] S[-INV] -> NP[] * VP[] {}
|
| 378 |
+
|. . . . . [-> .| [5:6] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
| 379 |
+
|. . . . . [-> .| [5:6] S[-INV] -> NP[] * S[]/NP[] {}
|
| 380 |
+
|. . . . . . [-]| [6:7] V[-AUX, SUBCAT='trans'] -> 'like' *
|
| 381 |
+
|. . . . . . [->| [6:7] VP[] -> V[-AUX, SUBCAT='trans'] * NP[] {}
|
| 382 |
+
|. . . . . . [->| [6:7] VP[]/?x[] -> V[-AUX, SUBCAT='trans'] * NP[]/?x[] {}
|
| 383 |
+
|. . . . . . [-]| [6:7] VP[]/NP[] -> V[-AUX, SUBCAT='trans'] NP[]/NP[] *
|
| 384 |
+
|. . . . . [---]| [5:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
|
| 385 |
+
|. . . . [-----]| [4:7] SBar[]/NP[] -> Comp[] S[-INV]/NP[] *
|
| 386 |
+
|. . . [-------]| [3:7] VP[]/NP[] -> V[-AUX, SUBCAT='clause'] SBar[]/NP[] *
|
| 387 |
+
|. . [---------]| [2:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
|
| 388 |
+
|. [-----------]| [1:7] S[+INV]/NP[] -> V[+AUX] NP[] VP[]/NP[] *
|
| 389 |
+
|[=============]| [0:7] S[-INV] -> NP[] S[]/NP[] *
|
| 390 |
+
|
| 391 |
+
>>> trees = list(trees)
|
| 392 |
+
>>> for tree in trees: print(tree)
|
| 393 |
+
(S[-INV]
|
| 394 |
+
(NP[+WH] who)
|
| 395 |
+
(S[+INV]/NP[]
|
| 396 |
+
(V[+AUX] do)
|
| 397 |
+
(NP[-WH] you)
|
| 398 |
+
(VP[]/NP[]
|
| 399 |
+
(V[-AUX, SUBCAT='clause'] claim)
|
| 400 |
+
(SBar[]/NP[]
|
| 401 |
+
(Comp[] that)
|
| 402 |
+
(S[-INV]/NP[]
|
| 403 |
+
(NP[-WH] you)
|
| 404 |
+
(VP[]/NP[] (V[-AUX, SUBCAT='trans'] like) (NP[]/NP[] )))))))
|
| 405 |
+
|
| 406 |
+
A different parser should give the same parse trees, but perhaps in a different order:
|
| 407 |
+
|
| 408 |
+
>>> cp2 = parse.load_parser('grammars/book_grammars/feat1.fcfg', trace=1,
|
| 409 |
+
... parser=parse.FeatureEarleyChartParser)
|
| 410 |
+
>>> trees2 = cp2.parse(tokens)
|
| 411 |
+
|.w.d.y.c.t.y.l.|
|
| 412 |
+
|[-] . . . . . .| [0:1] 'who'
|
| 413 |
+
|. [-] . . . . .| [1:2] 'do'
|
| 414 |
+
|. . [-] . . . .| [2:3] 'you'
|
| 415 |
+
|. . . [-] . . .| [3:4] 'claim'
|
| 416 |
+
|. . . . [-] . .| [4:5] 'that'
|
| 417 |
+
|. . . . . [-] .| [5:6] 'you'
|
| 418 |
+
|. . . . . . [-]| [6:7] 'like'
|
| 419 |
+
|> . . . . . . .| [0:0] S[-INV] -> * NP[] VP[] {}
|
| 420 |
+
|> . . . . . . .| [0:0] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
|
| 421 |
+
|> . . . . . . .| [0:0] S[-INV] -> * NP[] S[]/NP[] {}
|
| 422 |
+
|> . . . . . . .| [0:0] S[-INV] -> * Adv[+NEG] S[+INV] {}
|
| 423 |
+
|> . . . . . . .| [0:0] S[+INV] -> * V[+AUX] NP[] VP[] {}
|
| 424 |
+
|> . . . . . . .| [0:0] S[+INV]/?x[] -> * V[+AUX] NP[] VP[]/?x[] {}
|
| 425 |
+
|> . . . . . . .| [0:0] NP[+WH] -> * 'who' {}
|
| 426 |
+
|[-] . . . . . .| [0:1] NP[+WH] -> 'who' *
|
| 427 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * VP[] {}
|
| 428 |
+
|[-> . . . . . .| [0:1] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
| 429 |
+
|[-> . . . . . .| [0:1] S[-INV] -> NP[] * S[]/NP[] {}
|
| 430 |
+
|. > . . . . . .| [1:1] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
|
| 431 |
+
|. > . . . . . .| [1:1] S[+INV]/?x[] -> * V[+AUX] NP[] VP[]/?x[] {}
|
| 432 |
+
|. > . . . . . .| [1:1] V[+AUX] -> * 'do' {}
|
| 433 |
+
|. > . . . . . .| [1:1] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
| 434 |
+
|. > . . . . . .| [1:1] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
| 435 |
+
|. > . . . . . .| [1:1] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
| 436 |
+
|. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='intrans'] {}
|
| 437 |
+
|. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='trans'] NP[] {}
|
| 438 |
+
|. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='clause'] SBar[] {}
|
| 439 |
+
|. > . . . . . .| [1:1] VP[] -> * V[+AUX] VP[] {}
|
| 440 |
+
|. [-] . . . . .| [1:2] V[+AUX] -> 'do' *
|
| 441 |
+
|. [-> . . . . .| [1:2] S[+INV]/?x[] -> V[+AUX] * NP[] VP[]/?x[] {}
|
| 442 |
+
|. [-> . . . . .| [1:2] VP[]/?x[] -> V[+AUX] * VP[]/?x[] {}
|
| 443 |
+
|. [-> . . . . .| [1:2] VP[] -> V[+AUX] * VP[] {}
|
| 444 |
+
|. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='intrans'] {}
|
| 445 |
+
|. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='trans'] NP[] {}
|
| 446 |
+
|. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='clause'] SBar[] {}
|
| 447 |
+
|. . > . . . . .| [2:2] VP[] -> * V[+AUX] VP[] {}
|
| 448 |
+
|. . > . . . . .| [2:2] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
| 449 |
+
|. . > . . . . .| [2:2] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
| 450 |
+
|. . > . . . . .| [2:2] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
| 451 |
+
|. . > . . . . .| [2:2] NP[-WH] -> * 'you' {}
|
| 452 |
+
|. . [-] . . . .| [2:3] NP[-WH] -> 'you' *
|
| 453 |
+
|. [---> . . . .| [1:3] S[+INV]/?x[] -> V[+AUX] NP[] * VP[]/?x[] {}
|
| 454 |
+
|. . . > . . . .| [3:3] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
| 455 |
+
|. . . > . . . .| [3:3] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
| 456 |
+
|. . . > . . . .| [3:3] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
| 457 |
+
|. . . > . . . .| [3:3] V[-AUX, SUBCAT='clause'] -> * 'claim' {}
|
| 458 |
+
|. . . [-] . . .| [3:4] V[-AUX, SUBCAT='clause'] -> 'claim' *
|
| 459 |
+
|. . . [-> . . .| [3:4] VP[]/?x[] -> V[-AUX, SUBCAT='clause'] * SBar[]/?x[] {}
|
| 460 |
+
|. . . . > . . .| [4:4] SBar[]/?x[] -> * Comp[] S[-INV]/?x[] {}
|
| 461 |
+
|. . . . > . . .| [4:4] Comp[] -> * 'that' {}
|
| 462 |
+
|. . . . [-] . .| [4:5] Comp[] -> 'that' *
|
| 463 |
+
|. . . . [-> . .| [4:5] SBar[]/?x[] -> Comp[] * S[-INV]/?x[] {}
|
| 464 |
+
|. . . . . > . .| [5:5] S[-INV]/?x[] -> * NP[] VP[]/?x[] {}
|
| 465 |
+
|. . . . . > . .| [5:5] NP[-WH] -> * 'you' {}
|
| 466 |
+
|. . . . . [-] .| [5:6] NP[-WH] -> 'you' *
|
| 467 |
+
|. . . . . [-> .| [5:6] S[-INV]/?x[] -> NP[] * VP[]/?x[] {}
|
| 468 |
+
|. . . . . . > .| [6:6] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {}
|
| 469 |
+
|. . . . . . > .| [6:6] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {}
|
| 470 |
+
|. . . . . . > .| [6:6] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {}
|
| 471 |
+
|. . . . . . > .| [6:6] V[-AUX, SUBCAT='trans'] -> * 'like' {}
|
| 472 |
+
|. . . . . . [-]| [6:7] V[-AUX, SUBCAT='trans'] -> 'like' *
|
| 473 |
+
|. . . . . . [->| [6:7] VP[]/?x[] -> V[-AUX, SUBCAT='trans'] * NP[]/?x[] {}
|
| 474 |
+
|. . . . . . . #| [7:7] NP[]/NP[] -> *
|
| 475 |
+
|. . . . . . [-]| [6:7] VP[]/NP[] -> V[-AUX, SUBCAT='trans'] NP[]/NP[] *
|
| 476 |
+
|. . . . . [---]| [5:7] S[-INV]/NP[] -> NP[] VP[]/NP[] *
|
| 477 |
+
|. . . . [-----]| [4:7] SBar[]/NP[] -> Comp[] S[-INV]/NP[] *
|
| 478 |
+
|. . . [-------]| [3:7] VP[]/NP[] -> V[-AUX, SUBCAT='clause'] SBar[]/NP[] *
|
| 479 |
+
|. [-----------]| [1:7] S[+INV]/NP[] -> V[+AUX] NP[] VP[]/NP[] *
|
| 480 |
+
|[=============]| [0:7] S[-INV] -> NP[] S[]/NP[] *
|
| 481 |
+
|
| 482 |
+
>>> sorted(trees) == sorted(trees2)
|
| 483 |
+
True
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
Let's load a German grammar:
|
| 487 |
+
|
| 488 |
+
>>> cp = parse.load_parser('grammars/book_grammars/german.fcfg', trace=0)
|
| 489 |
+
>>> sent = 'die Katze sieht den Hund'
|
| 490 |
+
>>> tokens = sent.split()
|
| 491 |
+
>>> trees = cp.parse(tokens)
|
| 492 |
+
>>> for tree in trees: print(tree)
|
| 493 |
+
(S[]
|
| 494 |
+
(NP[AGR=[GND='fem', NUM='sg', PER=3], CASE='nom']
|
| 495 |
+
(Det[AGR=[GND='fem', NUM='sg', PER=3], CASE='nom'] die)
|
| 496 |
+
(N[AGR=[GND='fem', NUM='sg', PER=3]] Katze))
|
| 497 |
+
(VP[AGR=[NUM='sg', PER=3]]
|
| 498 |
+
(TV[AGR=[NUM='sg', PER=3], OBJCASE='acc'] sieht)
|
| 499 |
+
(NP[AGR=[GND='masc', NUM='sg', PER=3], CASE='acc']
|
| 500 |
+
(Det[AGR=[GND='masc', NUM='sg', PER=3], CASE='acc'] den)
|
| 501 |
+
(N[AGR=[GND='masc', NUM='sg', PER=3]] Hund))))
|
| 502 |
+
|
| 503 |
+
Grammar with Binding Operators
|
| 504 |
+
------------------------------
|
| 505 |
+
The bindop.fcfg grammar is a semantic grammar that uses lambda
|
| 506 |
+
calculus. Each element has a core semantics, which is a single lambda
|
| 507 |
+
calculus expression; and a set of binding operators, which bind
|
| 508 |
+
variables.
|
| 509 |
+
|
| 510 |
+
In order to make the binding operators work right, they need to
|
| 511 |
+
instantiate their bound variable every time they are added to the
|
| 512 |
+
chart. To do this, we use a special subclass of `Chart`, called
|
| 513 |
+
`InstantiateVarsChart`.
|
| 514 |
+
|
| 515 |
+
>>> from nltk.parse.featurechart import InstantiateVarsChart
|
| 516 |
+
>>> cp = parse.load_parser('grammars/sample_grammars/bindop.fcfg', trace=1,
|
| 517 |
+
... chart_class=InstantiateVarsChart)
|
| 518 |
+
>>> print(cp.grammar())
|
| 519 |
+
Grammar with 15 productions (start state = S[])
|
| 520 |
+
S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] VP[SEM=[BO=?b2, CORE=?vp]]
|
| 521 |
+
VP[SEM=[BO={?b1+?b2}, CORE=<?v(?obj)>]] -> TV[SEM=[BO=?b1, CORE=?v]] NP[SEM=[BO=?b2, CORE=?obj]]
|
| 522 |
+
VP[SEM=?s] -> IV[SEM=?s]
|
| 523 |
+
NP[SEM=[BO={?b1+?b2+{bo(?det(?n),@x)}}, CORE=<@x>]] -> Det[SEM=[BO=?b1, CORE=?det]] N[SEM=[BO=?b2, CORE=?n]]
|
| 524 |
+
Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] -> 'a'
|
| 525 |
+
N[SEM=[BO={/}, CORE=<dog>]] -> 'dog'
|
| 526 |
+
N[SEM=[BO={/}, CORE=<dog>]] -> 'cat'
|
| 527 |
+
N[SEM=[BO={/}, CORE=<dog>]] -> 'mouse'
|
| 528 |
+
IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'barks'
|
| 529 |
+
IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'eats'
|
| 530 |
+
IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'walks'
|
| 531 |
+
TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'feeds'
|
| 532 |
+
TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'walks'
|
| 533 |
+
NP[SEM=[BO={bo(\P.P(John),@x)}, CORE=<@x>]] -> 'john'
|
| 534 |
+
NP[SEM=[BO={bo(\P.P(John),@x)}, CORE=<@x>]] -> 'alex'
|
| 535 |
+
|
| 536 |
+
A simple intransitive sentence:
|
| 537 |
+
|
| 538 |
+
>>> from nltk.sem import logic
|
| 539 |
+
>>> logic._counter._value = 100
|
| 540 |
+
|
| 541 |
+
>>> trees = cp.parse('john barks'.split())
|
| 542 |
+
|. john.barks.|
|
| 543 |
+
|[-----] .| [0:1] 'john'
|
| 544 |
+
|. [-----]| [1:2] 'barks'
|
| 545 |
+
|[-----] .| [0:1] NP[SEM=[BO={bo(\P.P(John),z101)}, CORE=<z101>]] -> 'john' *
|
| 546 |
+
|[-----> .| [0:1] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.P(John),z2)}, ?subj: <IndividualVariableExpression z2>}
|
| 547 |
+
|. [-----]| [1:2] IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'barks' *
|
| 548 |
+
|. [-----]| [1:2] VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] *
|
| 549 |
+
|[===========]| [0:2] S[SEM=[BO={bo(\P.P(John),z2)}, CORE=<bark(z2)>]] -> NP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<z2>]] VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] *
|
| 550 |
+
>>> for tree in trees: print(tree)
|
| 551 |
+
(S[SEM=[BO={bo(\P.P(John),z2)}, CORE=<bark(z2)>]]
|
| 552 |
+
(NP[SEM=[BO={bo(\P.P(John),z101)}, CORE=<z101>]] john)
|
| 553 |
+
(VP[SEM=[BO={/}, CORE=<\x.bark(x)>]]
|
| 554 |
+
(IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] barks)))
|
| 555 |
+
|
| 556 |
+
A transitive sentence:
|
| 557 |
+
|
| 558 |
+
>>> trees = cp.parse('john feeds a dog'.split())
|
| 559 |
+
|.joh.fee. a .dog.|
|
| 560 |
+
|[---] . . .| [0:1] 'john'
|
| 561 |
+
|. [---] . .| [1:2] 'feeds'
|
| 562 |
+
|. . [---] .| [2:3] 'a'
|
| 563 |
+
|. . . [---]| [3:4] 'dog'
|
| 564 |
+
|[---] . . .| [0:1] NP[SEM=[BO={bo(\P.P(John),z102)}, CORE=<z102>]] -> 'john' *
|
| 565 |
+
|[---> . . .| [0:1] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.P(John),z2)}, ?subj: <IndividualVariableExpression z2>}
|
| 566 |
+
|. [---] . .| [1:2] TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'feeds' *
|
| 567 |
+
|. [---> . .| [1:2] VP[SEM=[BO={?b1+?b2}, CORE=<?v(?obj)>]] -> TV[SEM=[BO=?b1, CORE=?v]] * NP[SEM=[BO=?b2, CORE=?obj]] {?b1: {/}, ?v: <LambdaExpression \x y.feed(y,x)>}
|
| 568 |
+
|. . [---] .| [2:3] Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] -> 'a' *
|
| 569 |
+
|. . [---> .| [2:3] NP[SEM=[BO={?b1+?b2+{bo(?det(?n),@x)}}, CORE=<@x>]] -> Det[SEM=[BO=?b1, CORE=?det]] * N[SEM=[BO=?b2, CORE=?n]] {?b1: {/}, ?det: <LambdaExpression \Q P.exists x.(Q(x) & P(x))>}
|
| 570 |
+
|. . . [---]| [3:4] N[SEM=[BO={/}, CORE=<dog>]] -> 'dog' *
|
| 571 |
+
|. . [-------]| [2:4] NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z103)}, CORE=<z103>]] -> Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] N[SEM=[BO={/}, CORE=<dog>]] *
|
| 572 |
+
|. . [------->| [2:4] S[SEM=[BO={?b1+?b2}, CORE=<?vp(?subj)>]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.exists x.(dog(x) & P(x)),z2)}, ?subj: <IndividualVariableExpression z2>}
|
| 573 |
+
|. [-----------]| [1:4] VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]] -> TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<z2>]] *
|
| 574 |
+
|[===============]| [0:4] S[SEM=[BO={bo(\P.P(John),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]] -> NP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<z2>]] VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<\y.feed(y,z3)>]] *
|
| 575 |
+
|
| 576 |
+
>>> for tree in trees: print(tree)
|
| 577 |
+
(S[SEM=[BO={bo(\P.P(John),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]]
|
| 578 |
+
(NP[SEM=[BO={bo(\P.P(John),z102)}, CORE=<z102>]] john)
|
| 579 |
+
(VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]]
|
| 580 |
+
(TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
|
| 581 |
+
(NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z103)}, CORE=<z103>]]
|
| 582 |
+
(Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
|
| 583 |
+
(N[SEM=[BO={/}, CORE=<dog>]] dog))))
|
| 584 |
+
|
| 585 |
+
Turn down the verbosity:
|
| 586 |
+
|
| 587 |
+
>>> cp = parse.load_parser('grammars/sample_grammars/bindop.fcfg', trace=0,
|
| 588 |
+
... chart_class=InstantiateVarsChart)
|
| 589 |
+
|
| 590 |
+
Reuse the same lexical item twice:
|
| 591 |
+
|
| 592 |
+
>>> trees = cp.parse('john feeds john'.split())
|
| 593 |
+
>>> for tree in trees: print(tree)
|
| 594 |
+
(S[SEM=[BO={bo(\P.P(John),z2), bo(\P.P(John),z3)}, CORE=<feed(z2,z3)>]]
|
| 595 |
+
(NP[SEM=[BO={bo(\P.P(John),z104)}, CORE=<z104>]] john)
|
| 596 |
+
(VP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<\y.feed(y,z2)>]]
|
| 597 |
+
(TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
|
| 598 |
+
(NP[SEM=[BO={bo(\P.P(John),z105)}, CORE=<z105>]] john)))
|
| 599 |
+
|
| 600 |
+
>>> trees = cp.parse('a dog feeds a dog'.split())
|
| 601 |
+
>>> for tree in trees: print(tree)
|
| 602 |
+
(S[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<feed(z2,z3)>]]
|
| 603 |
+
(NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z106)}, CORE=<z106>]]
|
| 604 |
+
(Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
|
| 605 |
+
(N[SEM=[BO={/}, CORE=<dog>]] dog))
|
| 606 |
+
(VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]]
|
| 607 |
+
(TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds)
|
| 608 |
+
(NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z107)}, CORE=<z107>]]
|
| 609 |
+
(Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a)
|
| 610 |
+
(N[SEM=[BO={/}, CORE=<dog>]] dog))))
|
lib/python3.10/site-packages/nltk/test/featstruct.doctest
ADDED
|
@@ -0,0 +1,1229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
==================================
|
| 5 |
+
Feature Structures & Unification
|
| 6 |
+
==================================
|
| 7 |
+
>>> from nltk.featstruct import FeatStruct
|
| 8 |
+
>>> from nltk.sem.logic import Variable, VariableExpression, Expression
|
| 9 |
+
|
| 10 |
+
.. note:: For now, featstruct uses the older lambdalogic semantics
|
| 11 |
+
module. Eventually, it should be updated to use the new first
|
| 12 |
+
order predicate logic module.
|
| 13 |
+
|
| 14 |
+
Overview
|
| 15 |
+
~~~~~~~~
|
| 16 |
+
A feature structure is a mapping from feature identifiers to feature
|
| 17 |
+
values, where feature values can be simple values (like strings or
|
| 18 |
+
ints), nested feature structures, or variables:
|
| 19 |
+
|
| 20 |
+
>>> fs1 = FeatStruct(number='singular', person=3)
|
| 21 |
+
>>> print(fs1)
|
| 22 |
+
[ number = 'singular' ]
|
| 23 |
+
[ person = 3 ]
|
| 24 |
+
|
| 25 |
+
Feature structure may be nested:
|
| 26 |
+
|
| 27 |
+
>>> fs2 = FeatStruct(type='NP', agr=fs1)
|
| 28 |
+
>>> print(fs2)
|
| 29 |
+
[ agr = [ number = 'singular' ] ]
|
| 30 |
+
[ [ person = 3 ] ]
|
| 31 |
+
[ ]
|
| 32 |
+
[ type = 'NP' ]
|
| 33 |
+
|
| 34 |
+
Variables are used to indicate that two features should be assigned
|
| 35 |
+
the same value. For example, the following feature structure requires
|
| 36 |
+
that the feature fs3['agr']['number'] be bound to the same value as the
|
| 37 |
+
feature fs3['subj']['number'].
|
| 38 |
+
|
| 39 |
+
>>> fs3 = FeatStruct(agr=FeatStruct(number=Variable('?n')),
|
| 40 |
+
... subj=FeatStruct(number=Variable('?n')))
|
| 41 |
+
>>> print(fs3)
|
| 42 |
+
[ agr = [ number = ?n ] ]
|
| 43 |
+
[ ]
|
| 44 |
+
[ subj = [ number = ?n ] ]
|
| 45 |
+
|
| 46 |
+
Feature structures are typically used to represent partial information
|
| 47 |
+
about objects. A feature name that is not mapped to a value stands
|
| 48 |
+
for a feature whose value is unknown (*not* a feature without a
|
| 49 |
+
value). Two feature structures that represent (potentially
|
| 50 |
+
overlapping) information about the same object can be combined by
|
| 51 |
+
*unification*.
|
| 52 |
+
|
| 53 |
+
>>> print(fs2.unify(fs3))
|
| 54 |
+
[ agr = [ number = 'singular' ] ]
|
| 55 |
+
[ [ person = 3 ] ]
|
| 56 |
+
[ ]
|
| 57 |
+
[ subj = [ number = 'singular' ] ]
|
| 58 |
+
[ ]
|
| 59 |
+
[ type = 'NP' ]
|
| 60 |
+
|
| 61 |
+
When two inconsistent feature structures are unified, the unification
|
| 62 |
+
fails and returns ``None``.
|
| 63 |
+
|
| 64 |
+
>>> fs4 = FeatStruct(agr=FeatStruct(person=1))
|
| 65 |
+
>>> print(fs4.unify(fs2))
|
| 66 |
+
None
|
| 67 |
+
>>> print(fs2.unify(fs4))
|
| 68 |
+
None
|
| 69 |
+
|
| 70 |
+
..
|
| 71 |
+
>>> del fs1, fs2, fs3, fs4 # clean-up
|
| 72 |
+
|
| 73 |
+
Feature Structure Types
|
| 74 |
+
-----------------------
|
| 75 |
+
There are actually two types of feature structure:
|
| 76 |
+
|
| 77 |
+
- *feature dictionaries*, implemented by `FeatDict`, act like
|
| 78 |
+
Python dictionaries. Feature identifiers may be strings or
|
| 79 |
+
instances of the `Feature` class.
|
| 80 |
+
- *feature lists*, implemented by `FeatList`, act like Python
|
| 81 |
+
lists. Feature identifiers are integers.
|
| 82 |
+
|
| 83 |
+
When you construct a feature structure using the `FeatStruct`
|
| 84 |
+
constructor, it will automatically decide which type is appropriate:
|
| 85 |
+
|
| 86 |
+
>>> type(FeatStruct(number='singular'))
|
| 87 |
+
<class 'nltk.featstruct.FeatDict'>
|
| 88 |
+
>>> type(FeatStruct([1,2,3]))
|
| 89 |
+
<class 'nltk.featstruct.FeatList'>
|
| 90 |
+
|
| 91 |
+
Usually, we will just use feature dictionaries; but sometimes feature
|
| 92 |
+
lists can be useful too. Two feature lists will unify with each other
|
| 93 |
+
only if they have equal lengths, and all of their feature values
|
| 94 |
+
match. If you wish to write a feature list that contains 'unknown'
|
| 95 |
+
values, you must use variables:
|
| 96 |
+
|
| 97 |
+
>>> fs1 = FeatStruct([1,2,Variable('?y')])
|
| 98 |
+
>>> fs2 = FeatStruct([1,Variable('?x'),3])
|
| 99 |
+
>>> fs1.unify(fs2)
|
| 100 |
+
[1, 2, 3]
|
| 101 |
+
|
| 102 |
+
..
|
| 103 |
+
>>> del fs1, fs2 # clean-up
|
| 104 |
+
|
| 105 |
+
Parsing Feature Structure Strings
|
| 106 |
+
---------------------------------
|
| 107 |
+
Feature structures can be constructed directly from strings. Often,
|
| 108 |
+
this is more convenient than constructing them directly. NLTK can
|
| 109 |
+
parse most feature strings to produce the corresponding feature
|
| 110 |
+
structures. (But you must restrict your base feature values to
|
| 111 |
+
strings, ints, logic expressions (`nltk.sem.logic.Expression`), and a
|
| 112 |
+
few other types discussed below).
|
| 113 |
+
|
| 114 |
+
Feature dictionaries are written like Python dictionaries, except that
|
| 115 |
+
keys are not put in quotes; and square brackets (``[]``) are used
|
| 116 |
+
instead of braces (``{}``):
|
| 117 |
+
|
| 118 |
+
>>> FeatStruct('[tense="past", agr=[number="sing", person=3]]')
|
| 119 |
+
[agr=[number='sing', person=3], tense='past']
|
| 120 |
+
|
| 121 |
+
If a feature value is a single alphanumeric word, then it does not
|
| 122 |
+
need to be quoted -- it will be automatically treated as a string:
|
| 123 |
+
|
| 124 |
+
>>> FeatStruct('[tense=past, agr=[number=sing, person=3]]')
|
| 125 |
+
[agr=[number='sing', person=3], tense='past']
|
| 126 |
+
|
| 127 |
+
Feature lists are written like python lists:
|
| 128 |
+
|
| 129 |
+
>>> FeatStruct('[1, 2, 3]')
|
| 130 |
+
[1, 2, 3]
|
| 131 |
+
|
| 132 |
+
The expression ``[]`` is treated as an empty feature dictionary, not
|
| 133 |
+
an empty feature list:
|
| 134 |
+
|
| 135 |
+
>>> type(FeatStruct('[]'))
|
| 136 |
+
<class 'nltk.featstruct.FeatDict'>
|
| 137 |
+
|
| 138 |
+
Feature Paths
|
| 139 |
+
-------------
|
| 140 |
+
Features can be specified using *feature paths*, or tuples of feature
|
| 141 |
+
identifiers that specify path through the nested feature structures to
|
| 142 |
+
a value.
|
| 143 |
+
|
| 144 |
+
>>> fs1 = FeatStruct('[x=1, y=[1,2,[z=3]]]')
|
| 145 |
+
>>> fs1['y']
|
| 146 |
+
[1, 2, [z=3]]
|
| 147 |
+
>>> fs1['y', 2]
|
| 148 |
+
[z=3]
|
| 149 |
+
>>> fs1['y', 2, 'z']
|
| 150 |
+
3
|
| 151 |
+
|
| 152 |
+
..
|
| 153 |
+
>>> del fs1 # clean-up
|
| 154 |
+
|
| 155 |
+
Reentrance
|
| 156 |
+
----------
|
| 157 |
+
Feature structures may contain reentrant feature values. A *reentrant
|
| 158 |
+
feature value* is a single feature structure that can be accessed via
|
| 159 |
+
multiple feature paths.
|
| 160 |
+
|
| 161 |
+
>>> fs1 = FeatStruct(x='val')
|
| 162 |
+
>>> fs2 = FeatStruct(a=fs1, b=fs1)
|
| 163 |
+
>>> print(fs2)
|
| 164 |
+
[ a = (1) [ x = 'val' ] ]
|
| 165 |
+
[ ]
|
| 166 |
+
[ b -> (1) ]
|
| 167 |
+
>>> fs2
|
| 168 |
+
[a=(1)[x='val'], b->(1)]
|
| 169 |
+
|
| 170 |
+
As you can see, reentrane is displayed by marking a feature structure
|
| 171 |
+
with a unique identifier, in this case ``(1)``, the first time it is
|
| 172 |
+
encountered; and then using the special form ``var -> id`` whenever it
|
| 173 |
+
is encountered again. You can use the same notation to directly
|
| 174 |
+
create reentrant feature structures from strings.
|
| 175 |
+
|
| 176 |
+
>>> FeatStruct('[a=(1)[], b->(1), c=[d->(1)]]')
|
| 177 |
+
[a=(1)[], b->(1), c=[d->(1)]]
|
| 178 |
+
|
| 179 |
+
Reentrant feature structures may contain cycles:
|
| 180 |
+
|
| 181 |
+
>>> fs3 = FeatStruct('(1)[a->(1)]')
|
| 182 |
+
>>> fs3['a', 'a', 'a', 'a']
|
| 183 |
+
(1)[a->(1)]
|
| 184 |
+
>>> fs3['a', 'a', 'a', 'a'] is fs3
|
| 185 |
+
True
|
| 186 |
+
|
| 187 |
+
Unification preserves the reentrance relations imposed by both of the
|
| 188 |
+
unified feature structures. In the feature structure resulting from
|
| 189 |
+
unification, any modifications to a reentrant feature value will be
|
| 190 |
+
visible using any of its feature paths.
|
| 191 |
+
|
| 192 |
+
>>> fs3.unify(FeatStruct('[a=[b=12], c=33]'))
|
| 193 |
+
(1)[a->(1), b=12, c=33]
|
| 194 |
+
|
| 195 |
+
..
|
| 196 |
+
>>> del fs1, fs2, fs3 # clean-up
|
| 197 |
+
|
| 198 |
+
Feature Structure Equality
|
| 199 |
+
--------------------------
|
| 200 |
+
Two feature structures are considered equal if they assign the same
|
| 201 |
+
values to all features, *and* they contain the same reentrances.
|
| 202 |
+
|
| 203 |
+
>>> fs1 = FeatStruct('[a=(1)[x=1], b->(1)]')
|
| 204 |
+
>>> fs2 = FeatStruct('[a=(1)[x=1], b->(1)]')
|
| 205 |
+
>>> fs3 = FeatStruct('[a=[x=1], b=[x=1]]')
|
| 206 |
+
>>> fs1 == fs1, fs1 is fs1
|
| 207 |
+
(True, True)
|
| 208 |
+
>>> fs1 == fs2, fs1 is fs2
|
| 209 |
+
(True, False)
|
| 210 |
+
>>> fs1 == fs3, fs1 is fs3
|
| 211 |
+
(False, False)
|
| 212 |
+
|
| 213 |
+
Note that this differs from how Python dictionaries and lists define
|
| 214 |
+
equality -- in particular, Python dictionaries and lists ignore
|
| 215 |
+
reentrance relations. To test two feature structures for equality
|
| 216 |
+
while ignoring reentrance relations, use the `equal_values()` method:
|
| 217 |
+
|
| 218 |
+
>>> fs1.equal_values(fs1)
|
| 219 |
+
True
|
| 220 |
+
>>> fs1.equal_values(fs2)
|
| 221 |
+
True
|
| 222 |
+
>>> fs1.equal_values(fs3)
|
| 223 |
+
True
|
| 224 |
+
|
| 225 |
+
..
|
| 226 |
+
>>> del fs1, fs2, fs3 # clean-up
|
| 227 |
+
|
| 228 |
+
Feature Value Sets & Feature Value Tuples
|
| 229 |
+
-----------------------------------------
|
| 230 |
+
`nltk.featstruct` defines two new data types that are intended to be
|
| 231 |
+
used as feature values: `FeatureValueTuple` and `FeatureValueSet`.
|
| 232 |
+
Both of these types are considered base values -- i.e., unification
|
| 233 |
+
does *not* apply to them. However, variable binding *does* apply to
|
| 234 |
+
any values that they contain.
|
| 235 |
+
|
| 236 |
+
Feature value tuples are written with parentheses:
|
| 237 |
+
|
| 238 |
+
>>> fs1 = FeatStruct('[x=(?x, ?y)]')
|
| 239 |
+
>>> fs1
|
| 240 |
+
[x=(?x, ?y)]
|
| 241 |
+
>>> fs1.substitute_bindings({Variable('?x'): 1, Variable('?y'): 2})
|
| 242 |
+
[x=(1, 2)]
|
| 243 |
+
|
| 244 |
+
Feature sets are written with braces:
|
| 245 |
+
|
| 246 |
+
>>> fs1 = FeatStruct('[x={?x, ?y}]')
|
| 247 |
+
>>> fs1
|
| 248 |
+
[x={?x, ?y}]
|
| 249 |
+
>>> fs1.substitute_bindings({Variable('?x'): 1, Variable('?y'): 2})
|
| 250 |
+
[x={1, 2}]
|
| 251 |
+
|
| 252 |
+
In addition to the basic feature value tuple & set classes, nltk
|
| 253 |
+
defines feature value unions (for sets) and feature value
|
| 254 |
+
concatenations (for tuples). These are written using '+', and can be
|
| 255 |
+
used to combine sets & tuples:
|
| 256 |
+
|
| 257 |
+
>>> fs1 = FeatStruct('[x=((1, 2)+?z), z=?z]')
|
| 258 |
+
>>> fs1
|
| 259 |
+
[x=((1, 2)+?z), z=?z]
|
| 260 |
+
>>> fs1.unify(FeatStruct('[z=(3, 4, 5)]'))
|
| 261 |
+
[x=(1, 2, 3, 4, 5), z=(3, 4, 5)]
|
| 262 |
+
|
| 263 |
+
Thus, feature value tuples and sets can be used to build up tuples
|
| 264 |
+
and sets of values over the course of unification. For example, when
|
| 265 |
+
parsing sentences using a semantic feature grammar, feature sets or
|
| 266 |
+
feature tuples can be used to build a list of semantic predicates as
|
| 267 |
+
the sentence is parsed.
|
| 268 |
+
|
| 269 |
+
As was mentioned above, unification does not apply to feature value
|
| 270 |
+
tuples and sets. One reason for this that it's impossible to define a
|
| 271 |
+
single correct answer for unification when concatenation is used.
|
| 272 |
+
Consider the following example:
|
| 273 |
+
|
| 274 |
+
>>> fs1 = FeatStruct('[x=(1, 2, 3, 4)]')
|
| 275 |
+
>>> fs2 = FeatStruct('[x=(?a+?b), a=?a, b=?b]')
|
| 276 |
+
|
| 277 |
+
If unification applied to feature tuples, then the unification
|
| 278 |
+
algorithm would have to arbitrarily choose how to divide the tuple
|
| 279 |
+
(1,2,3,4) into two parts. Instead, the unification algorithm refuses
|
| 280 |
+
to make this decision, and simply unifies based on value. Because
|
| 281 |
+
(1,2,3,4) is not equal to (?a+?b), fs1 and fs2 will not unify:
|
| 282 |
+
|
| 283 |
+
>>> print(fs1.unify(fs2))
|
| 284 |
+
None
|
| 285 |
+
|
| 286 |
+
If you need a list-like structure that unification does apply to, use
|
| 287 |
+
`FeatList`.
|
| 288 |
+
|
| 289 |
+
..
|
| 290 |
+
>>> del fs1, fs2 # clean-up
|
| 291 |
+
|
| 292 |
+
Light-weight Feature Structures
|
| 293 |
+
-------------------------------
|
| 294 |
+
Many of the functions defined by `nltk.featstruct` can be applied
|
| 295 |
+
directly to simple Python dictionaries and lists, rather than to
|
| 296 |
+
full-fledged `FeatDict` and `FeatList` objects. In other words,
|
| 297 |
+
Python ``dicts`` and ``lists`` can be used as "light-weight" feature
|
| 298 |
+
structures.
|
| 299 |
+
|
| 300 |
+
>>> # Note: pprint prints dicts sorted
|
| 301 |
+
>>> from pprint import pprint
|
| 302 |
+
>>> from nltk.featstruct import unify
|
| 303 |
+
>>> pprint(unify(dict(x=1, y=dict()), dict(a='a', y=dict(b='b'))))
|
| 304 |
+
{'a': 'a', 'x': 1, 'y': {'b': 'b'}}
|
| 305 |
+
|
| 306 |
+
However, you should keep in mind the following caveats:
|
| 307 |
+
|
| 308 |
+
- Python dictionaries & lists ignore reentrance when checking for
|
| 309 |
+
equality between values. But two FeatStructs with different
|
| 310 |
+
reentrances are considered nonequal, even if all their base
|
| 311 |
+
values are equal.
|
| 312 |
+
|
| 313 |
+
- FeatStructs can be easily frozen, allowing them to be used as
|
| 314 |
+
keys in hash tables. Python dictionaries and lists can not.
|
| 315 |
+
|
| 316 |
+
- FeatStructs display reentrance in their string representations;
|
| 317 |
+
Python dictionaries and lists do not.
|
| 318 |
+
|
| 319 |
+
- FeatStructs may *not* be mixed with Python dictionaries and lists
|
| 320 |
+
(e.g., when performing unification).
|
| 321 |
+
|
| 322 |
+
- FeatStructs provide a number of useful methods, such as `walk()`
|
| 323 |
+
and `cyclic()`, which are not available for Python dicts & lists.
|
| 324 |
+
|
| 325 |
+
In general, if your feature structures will contain any reentrances,
|
| 326 |
+
or if you plan to use them as dictionary keys, it is strongly
|
| 327 |
+
recommended that you use full-fledged `FeatStruct` objects.
|
| 328 |
+
|
| 329 |
+
Custom Feature Values
|
| 330 |
+
---------------------
|
| 331 |
+
The abstract base class `CustomFeatureValue` can be used to define new
|
| 332 |
+
base value types that have custom unification methods. For example,
|
| 333 |
+
the following feature value type encodes a range, and defines
|
| 334 |
+
unification as taking the intersection on the ranges:
|
| 335 |
+
|
| 336 |
+
>>> from functools import total_ordering
|
| 337 |
+
>>> from nltk.featstruct import CustomFeatureValue, UnificationFailure
|
| 338 |
+
>>> @total_ordering
|
| 339 |
+
... class Range(CustomFeatureValue):
|
| 340 |
+
... def __init__(self, low, high):
|
| 341 |
+
... assert low <= high
|
| 342 |
+
... self.low = low
|
| 343 |
+
... self.high = high
|
| 344 |
+
... def unify(self, other):
|
| 345 |
+
... if not isinstance(other, Range):
|
| 346 |
+
... return UnificationFailure
|
| 347 |
+
... low = max(self.low, other.low)
|
| 348 |
+
... high = min(self.high, other.high)
|
| 349 |
+
... if low <= high: return Range(low, high)
|
| 350 |
+
... else: return UnificationFailure
|
| 351 |
+
... def __repr__(self):
|
| 352 |
+
... return '(%s<x<%s)' % (self.low, self.high)
|
| 353 |
+
... def __eq__(self, other):
|
| 354 |
+
... if not isinstance(other, Range):
|
| 355 |
+
... return False
|
| 356 |
+
... return (self.low == other.low) and (self.high == other.high)
|
| 357 |
+
... def __lt__(self, other):
|
| 358 |
+
... if not isinstance(other, Range):
|
| 359 |
+
... return True
|
| 360 |
+
... return (self.low, self.high) < (other.low, other.high)
|
| 361 |
+
|
| 362 |
+
>>> fs1 = FeatStruct(x=Range(5,8), y=FeatStruct(z=Range(7,22)))
|
| 363 |
+
>>> print(fs1.unify(FeatStruct(x=Range(6, 22))))
|
| 364 |
+
[ x = (6<x<8) ]
|
| 365 |
+
[ ]
|
| 366 |
+
[ y = [ z = (7<x<22) ] ]
|
| 367 |
+
>>> print(fs1.unify(FeatStruct(x=Range(9, 12))))
|
| 368 |
+
None
|
| 369 |
+
>>> print(fs1.unify(FeatStruct(x=12)))
|
| 370 |
+
None
|
| 371 |
+
>>> print(fs1.unify(FeatStruct('[x=?x, y=[z=?x]]')))
|
| 372 |
+
[ x = (7<x<8) ]
|
| 373 |
+
[ ]
|
| 374 |
+
[ y = [ z = (7<x<8) ] ]
|
| 375 |
+
|
| 376 |
+
Regression Tests
|
| 377 |
+
~~~~~~~~~~~~~~~~
|
| 378 |
+
|
| 379 |
+
Dictionary access methods (non-mutating)
|
| 380 |
+
----------------------------------------
|
| 381 |
+
|
| 382 |
+
>>> fs1 = FeatStruct(a=1, b=2, c=3)
|
| 383 |
+
>>> fs2 = FeatStruct(x=fs1, y='x')
|
| 384 |
+
|
| 385 |
+
Feature structures support all dictionary methods (excluding the class
|
| 386 |
+
method `dict.fromkeys()`). Non-mutating methods:
|
| 387 |
+
|
| 388 |
+
>>> sorted(fs2.keys()) # keys()
|
| 389 |
+
['x', 'y']
|
| 390 |
+
>>> sorted(fs2.values()) # values()
|
| 391 |
+
[[a=1, b=2, c=3], 'x']
|
| 392 |
+
>>> sorted(fs2.items()) # items()
|
| 393 |
+
[('x', [a=1, b=2, c=3]), ('y', 'x')]
|
| 394 |
+
>>> sorted(fs2) # __iter__()
|
| 395 |
+
['x', 'y']
|
| 396 |
+
>>> 'a' in fs2, 'x' in fs2 # __contains__()
|
| 397 |
+
(False, True)
|
| 398 |
+
>>> fs2.has_key('a'), fs2.has_key('x') # has_key()
|
| 399 |
+
(False, True)
|
| 400 |
+
>>> fs2['x'], fs2['y'] # __getitem__()
|
| 401 |
+
([a=1, b=2, c=3], 'x')
|
| 402 |
+
>>> fs2['a'] # __getitem__()
|
| 403 |
+
Traceback (most recent call last):
|
| 404 |
+
. . .
|
| 405 |
+
KeyError: 'a'
|
| 406 |
+
>>> fs2.get('x'), fs2.get('y'), fs2.get('a') # get()
|
| 407 |
+
([a=1, b=2, c=3], 'x', None)
|
| 408 |
+
>>> fs2.get('x', 'hello'), fs2.get('a', 'hello') # get()
|
| 409 |
+
([a=1, b=2, c=3], 'hello')
|
| 410 |
+
>>> len(fs1), len(fs2) # __len__
|
| 411 |
+
(3, 2)
|
| 412 |
+
>>> fs2.copy() # copy()
|
| 413 |
+
[x=[a=1, b=2, c=3], y='x']
|
| 414 |
+
>>> fs2.copy() is fs2 # copy()
|
| 415 |
+
False
|
| 416 |
+
|
| 417 |
+
Note: by default, `FeatStruct.copy()` does a deep copy. Use
|
| 418 |
+
`FeatStruct.copy(deep=False)` for a shallow copy.
|
| 419 |
+
|
| 420 |
+
..
|
| 421 |
+
>>> del fs1, fs2 # clean-up.
|
| 422 |
+
|
| 423 |
+
Dictionary access methods (mutating)
|
| 424 |
+
------------------------------------
|
| 425 |
+
>>> fs1 = FeatStruct(a=1, b=2, c=3)
|
| 426 |
+
>>> fs2 = FeatStruct(x=fs1, y='x')
|
| 427 |
+
|
| 428 |
+
Setting features (`__setitem__()`)
|
| 429 |
+
|
| 430 |
+
>>> fs1['c'] = 5
|
| 431 |
+
>>> fs1
|
| 432 |
+
[a=1, b=2, c=5]
|
| 433 |
+
>>> fs1['x'] = 12
|
| 434 |
+
>>> fs1
|
| 435 |
+
[a=1, b=2, c=5, x=12]
|
| 436 |
+
>>> fs2['x', 'a'] = 2
|
| 437 |
+
>>> fs2
|
| 438 |
+
[x=[a=2, b=2, c=5, x=12], y='x']
|
| 439 |
+
>>> fs1
|
| 440 |
+
[a=2, b=2, c=5, x=12]
|
| 441 |
+
|
| 442 |
+
Deleting features (`__delitem__()`)
|
| 443 |
+
|
| 444 |
+
>>> del fs1['x']
|
| 445 |
+
>>> fs1
|
| 446 |
+
[a=2, b=2, c=5]
|
| 447 |
+
>>> del fs2['x', 'a']
|
| 448 |
+
>>> fs1
|
| 449 |
+
[b=2, c=5]
|
| 450 |
+
|
| 451 |
+
`setdefault()`:
|
| 452 |
+
|
| 453 |
+
>>> fs1.setdefault('b', 99)
|
| 454 |
+
2
|
| 455 |
+
>>> fs1
|
| 456 |
+
[b=2, c=5]
|
| 457 |
+
>>> fs1.setdefault('x', 99)
|
| 458 |
+
99
|
| 459 |
+
>>> fs1
|
| 460 |
+
[b=2, c=5, x=99]
|
| 461 |
+
|
| 462 |
+
`update()`:
|
| 463 |
+
|
| 464 |
+
>>> fs2.update({'a':'A', 'b':'B'}, c='C')
|
| 465 |
+
>>> fs2
|
| 466 |
+
[a='A', b='B', c='C', x=[b=2, c=5, x=99], y='x']
|
| 467 |
+
|
| 468 |
+
`pop()`:
|
| 469 |
+
|
| 470 |
+
>>> fs2.pop('a')
|
| 471 |
+
'A'
|
| 472 |
+
>>> fs2
|
| 473 |
+
[b='B', c='C', x=[b=2, c=5, x=99], y='x']
|
| 474 |
+
>>> fs2.pop('a')
|
| 475 |
+
Traceback (most recent call last):
|
| 476 |
+
. . .
|
| 477 |
+
KeyError: 'a'
|
| 478 |
+
>>> fs2.pop('a', 'foo')
|
| 479 |
+
'foo'
|
| 480 |
+
>>> fs2
|
| 481 |
+
[b='B', c='C', x=[b=2, c=5, x=99], y='x']
|
| 482 |
+
|
| 483 |
+
`clear()`:
|
| 484 |
+
|
| 485 |
+
>>> fs1.clear()
|
| 486 |
+
>>> fs1
|
| 487 |
+
[]
|
| 488 |
+
>>> fs2
|
| 489 |
+
[b='B', c='C', x=[], y='x']
|
| 490 |
+
|
| 491 |
+
`popitem()`:
|
| 492 |
+
|
| 493 |
+
>>> sorted([fs2.popitem() for i in range(len(fs2))])
|
| 494 |
+
[('b', 'B'), ('c', 'C'), ('x', []), ('y', 'x')]
|
| 495 |
+
>>> fs2
|
| 496 |
+
[]
|
| 497 |
+
|
| 498 |
+
Once a feature structure has been frozen, it may not be mutated.
|
| 499 |
+
|
| 500 |
+
>>> fs1 = FeatStruct('[x=1, y=2, z=[a=3]]')
|
| 501 |
+
>>> fs1.freeze()
|
| 502 |
+
>>> fs1.frozen()
|
| 503 |
+
True
|
| 504 |
+
>>> fs1['z'].frozen()
|
| 505 |
+
True
|
| 506 |
+
|
| 507 |
+
>>> fs1['x'] = 5
|
| 508 |
+
Traceback (most recent call last):
|
| 509 |
+
. . .
|
| 510 |
+
ValueError: Frozen FeatStructs may not be modified.
|
| 511 |
+
>>> del fs1['x']
|
| 512 |
+
Traceback (most recent call last):
|
| 513 |
+
. . .
|
| 514 |
+
ValueError: Frozen FeatStructs may not be modified.
|
| 515 |
+
>>> fs1.clear()
|
| 516 |
+
Traceback (most recent call last):
|
| 517 |
+
. . .
|
| 518 |
+
ValueError: Frozen FeatStructs may not be modified.
|
| 519 |
+
>>> fs1.pop('x')
|
| 520 |
+
Traceback (most recent call last):
|
| 521 |
+
. . .
|
| 522 |
+
ValueError: Frozen FeatStructs may not be modified.
|
| 523 |
+
>>> fs1.popitem()
|
| 524 |
+
Traceback (most recent call last):
|
| 525 |
+
. . .
|
| 526 |
+
ValueError: Frozen FeatStructs may not be modified.
|
| 527 |
+
>>> fs1.setdefault('x')
|
| 528 |
+
Traceback (most recent call last):
|
| 529 |
+
. . .
|
| 530 |
+
ValueError: Frozen FeatStructs may not be modified.
|
| 531 |
+
>>> fs1.update(z=22)
|
| 532 |
+
Traceback (most recent call last):
|
| 533 |
+
. . .
|
| 534 |
+
ValueError: Frozen FeatStructs may not be modified.
|
| 535 |
+
|
| 536 |
+
..
|
| 537 |
+
>>> del fs1, fs2 # clean-up.
|
| 538 |
+
|
| 539 |
+
Feature Paths
|
| 540 |
+
-------------
|
| 541 |
+
Make sure that __getitem__ with feature paths works as intended:
|
| 542 |
+
|
| 543 |
+
>>> fs1 = FeatStruct(a=1, b=2,
|
| 544 |
+
... c=FeatStruct(
|
| 545 |
+
... d=FeatStruct(e=12),
|
| 546 |
+
... f=FeatStruct(g=55, h='hello')))
|
| 547 |
+
>>> fs1[()]
|
| 548 |
+
[a=1, b=2, c=[d=[e=12], f=[g=55, h='hello']]]
|
| 549 |
+
>>> fs1['a'], fs1[('a',)]
|
| 550 |
+
(1, 1)
|
| 551 |
+
>>> fs1['c','d','e']
|
| 552 |
+
12
|
| 553 |
+
>>> fs1['c','f','g']
|
| 554 |
+
55
|
| 555 |
+
|
| 556 |
+
Feature paths that select unknown features raise KeyError:
|
| 557 |
+
|
| 558 |
+
>>> fs1['c', 'f', 'e']
|
| 559 |
+
Traceback (most recent call last):
|
| 560 |
+
. . .
|
| 561 |
+
KeyError: ('c', 'f', 'e')
|
| 562 |
+
>>> fs1['q', 'p']
|
| 563 |
+
Traceback (most recent call last):
|
| 564 |
+
. . .
|
| 565 |
+
KeyError: ('q', 'p')
|
| 566 |
+
|
| 567 |
+
Feature paths that try to go 'through' a feature that's not a feature
|
| 568 |
+
structure raise KeyError:
|
| 569 |
+
|
| 570 |
+
>>> fs1['a', 'b']
|
| 571 |
+
Traceback (most recent call last):
|
| 572 |
+
. . .
|
| 573 |
+
KeyError: ('a', 'b')
|
| 574 |
+
|
| 575 |
+
Feature paths can go through reentrant structures:
|
| 576 |
+
|
| 577 |
+
>>> fs2 = FeatStruct('(1)[a=[b=[c->(1), d=5], e=11]]')
|
| 578 |
+
>>> fs2['a', 'b', 'c', 'a', 'e']
|
| 579 |
+
11
|
| 580 |
+
>>> fs2['a', 'b', 'c', 'a', 'b', 'd']
|
| 581 |
+
5
|
| 582 |
+
>>> fs2[tuple('abcabcabcabcabcabcabcabcabcabca')]
|
| 583 |
+
(1)[b=[c=[a->(1)], d=5], e=11]
|
| 584 |
+
|
| 585 |
+
Indexing requires strings, `Feature`\s, or tuples; other types raise a
|
| 586 |
+
TypeError:
|
| 587 |
+
|
| 588 |
+
>>> fs2[12]
|
| 589 |
+
Traceback (most recent call last):
|
| 590 |
+
. . .
|
| 591 |
+
TypeError: Expected feature name or path. Got 12.
|
| 592 |
+
>>> fs2[list('abc')]
|
| 593 |
+
Traceback (most recent call last):
|
| 594 |
+
. . .
|
| 595 |
+
TypeError: Expected feature name or path. Got ['a', 'b', 'c'].
|
| 596 |
+
|
| 597 |
+
Feature paths can also be used with `get()`, `has_key()`, and
|
| 598 |
+
`__contains__()`.
|
| 599 |
+
|
| 600 |
+
>>> fpath1 = tuple('abcabc')
|
| 601 |
+
>>> fpath2 = tuple('abcabz')
|
| 602 |
+
>>> fs2.get(fpath1), fs2.get(fpath2)
|
| 603 |
+
((1)[a=[b=[c->(1), d=5], e=11]], None)
|
| 604 |
+
>>> fpath1 in fs2, fpath2 in fs2
|
| 605 |
+
(True, False)
|
| 606 |
+
>>> fs2.has_key(fpath1), fs2.has_key(fpath2)
|
| 607 |
+
(True, False)
|
| 608 |
+
|
| 609 |
+
..
|
| 610 |
+
>>> del fs1, fs2 # clean-up
|
| 611 |
+
|
| 612 |
+
Reading Feature Structures
|
| 613 |
+
--------------------------
|
| 614 |
+
|
| 615 |
+
Empty feature struct:
|
| 616 |
+
|
| 617 |
+
>>> FeatStruct('[]')
|
| 618 |
+
[]
|
| 619 |
+
|
| 620 |
+
Test features with integer values:
|
| 621 |
+
|
| 622 |
+
>>> FeatStruct('[a=12, b=-33, c=0]')
|
| 623 |
+
[a=12, b=-33, c=0]
|
| 624 |
+
|
| 625 |
+
Test features with string values. Either single or double quotes may
|
| 626 |
+
be used. Strings are evaluated just like python strings -- in
|
| 627 |
+
particular, you can use escape sequences and 'u' and 'r' prefixes, and
|
| 628 |
+
triple-quoted strings.
|
| 629 |
+
|
| 630 |
+
>>> FeatStruct('[a="", b="hello", c="\'", d=\'\', e=\'"\']')
|
| 631 |
+
[a='', b='hello', c="'", d='', e='"']
|
| 632 |
+
>>> FeatStruct(r'[a="\\", b="\"", c="\x6f\\y", d="12"]')
|
| 633 |
+
[a='\\', b='"', c='o\\y', d='12']
|
| 634 |
+
>>> FeatStruct(r'[b=r"a\b\c"]')
|
| 635 |
+
[b='a\\b\\c']
|
| 636 |
+
>>> FeatStruct('[x="""a"""]')
|
| 637 |
+
[x='a']
|
| 638 |
+
|
| 639 |
+
Test parsing of reentrant feature structures.
|
| 640 |
+
|
| 641 |
+
>>> FeatStruct('[a=(1)[], b->(1)]')
|
| 642 |
+
[a=(1)[], b->(1)]
|
| 643 |
+
>>> FeatStruct('[a=(1)[x=1, y=2], b->(1)]')
|
| 644 |
+
[a=(1)[x=1, y=2], b->(1)]
|
| 645 |
+
|
| 646 |
+
Test parsing of cyclic feature structures.
|
| 647 |
+
|
| 648 |
+
>>> FeatStruct('[a=(1)[b->(1)]]')
|
| 649 |
+
[a=(1)[b->(1)]]
|
| 650 |
+
>>> FeatStruct('(1)[a=[b=[c->(1)]]]')
|
| 651 |
+
(1)[a=[b=[c->(1)]]]
|
| 652 |
+
|
| 653 |
+
Strings of the form "+name" and "-name" may be used to specify boolean
|
| 654 |
+
values.
|
| 655 |
+
|
| 656 |
+
>>> FeatStruct('[-bar, +baz, +foo]')
|
| 657 |
+
[-bar, +baz, +foo]
|
| 658 |
+
|
| 659 |
+
None, True, and False are recognized as values:
|
| 660 |
+
|
| 661 |
+
>>> FeatStruct('[bar=True, baz=False, foo=None]')
|
| 662 |
+
[+bar, -baz, foo=None]
|
| 663 |
+
|
| 664 |
+
Special features:
|
| 665 |
+
|
| 666 |
+
>>> FeatStruct('NP/VP')
|
| 667 |
+
NP[]/VP[]
|
| 668 |
+
>>> FeatStruct('?x/?x')
|
| 669 |
+
?x[]/?x[]
|
| 670 |
+
>>> print(FeatStruct('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'))
|
| 671 |
+
[ *type* = 'VP' ]
|
| 672 |
+
[ ]
|
| 673 |
+
[ [ *type* = 'NP' ] ]
|
| 674 |
+
[ *slash* = [ agr = ?x ] ]
|
| 675 |
+
[ [ pl = True ] ]
|
| 676 |
+
[ ]
|
| 677 |
+
[ agr = ?x ]
|
| 678 |
+
[ fin = True ]
|
| 679 |
+
[ tense = 'past' ]
|
| 680 |
+
|
| 681 |
+
Here the slash feature gets coerced:
|
| 682 |
+
|
| 683 |
+
>>> FeatStruct('[*slash*=a, x=b, *type*="NP"]')
|
| 684 |
+
NP[x='b']/a[]
|
| 685 |
+
|
| 686 |
+
>>> FeatStruct('NP[sem=<bob>]/NP')
|
| 687 |
+
NP[sem=<bob>]/NP[]
|
| 688 |
+
>>> FeatStruct('S[sem=<walk(bob)>]')
|
| 689 |
+
S[sem=<walk(bob)>]
|
| 690 |
+
>>> print(FeatStruct('NP[sem=<bob>]/NP'))
|
| 691 |
+
[ *type* = 'NP' ]
|
| 692 |
+
[ ]
|
| 693 |
+
[ *slash* = [ *type* = 'NP' ] ]
|
| 694 |
+
[ ]
|
| 695 |
+
[ sem = <bob> ]
|
| 696 |
+
|
| 697 |
+
Playing with ranges:
|
| 698 |
+
|
| 699 |
+
>>> from nltk.featstruct import RangeFeature, FeatStructReader
|
| 700 |
+
>>> width = RangeFeature('width')
|
| 701 |
+
>>> reader = FeatStructReader([width])
|
| 702 |
+
>>> fs1 = reader.fromstring('[*width*=-5:12]')
|
| 703 |
+
>>> fs2 = reader.fromstring('[*width*=2:123]')
|
| 704 |
+
>>> fs3 = reader.fromstring('[*width*=-7:-2]')
|
| 705 |
+
>>> fs1.unify(fs2)
|
| 706 |
+
[*width*=(2, 12)]
|
| 707 |
+
>>> fs1.unify(fs3)
|
| 708 |
+
[*width*=(-5, -2)]
|
| 709 |
+
>>> print(fs2.unify(fs3)) # no overlap in width.
|
| 710 |
+
None
|
| 711 |
+
|
| 712 |
+
The slash feature has a default value of 'False':
|
| 713 |
+
|
| 714 |
+
>>> print(FeatStruct('NP[]/VP').unify(FeatStruct('NP[]'), trace=1))
|
| 715 |
+
<BLANKLINE>
|
| 716 |
+
Unification trace:
|
| 717 |
+
/ NP[]/VP[]
|
| 718 |
+
|\ NP[]
|
| 719 |
+
|
|
| 720 |
+
| Unify feature: *type*
|
| 721 |
+
| / 'NP'
|
| 722 |
+
| |\ 'NP'
|
| 723 |
+
| |
|
| 724 |
+
| +-->'NP'
|
| 725 |
+
|
|
| 726 |
+
| Unify feature: *slash*
|
| 727 |
+
| / VP[]
|
| 728 |
+
| |\ False
|
| 729 |
+
| |
|
| 730 |
+
X X <-- FAIL
|
| 731 |
+
None
|
| 732 |
+
|
| 733 |
+
The demo structures from category.py. They all parse, but they don't
|
| 734 |
+
do quite the right thing, -- ?x vs x.
|
| 735 |
+
|
| 736 |
+
>>> FeatStruct(pos='n', agr=FeatStruct(number='pl', gender='f'))
|
| 737 |
+
[agr=[gender='f', number='pl'], pos='n']
|
| 738 |
+
>>> FeatStruct(r'NP[sem=<bob>]/NP')
|
| 739 |
+
NP[sem=<bob>]/NP[]
|
| 740 |
+
>>> FeatStruct(r'S[sem=<app(?x, ?y)>]')
|
| 741 |
+
S[sem=<?x(?y)>]
|
| 742 |
+
>>> FeatStruct('?x/?x')
|
| 743 |
+
?x[]/?x[]
|
| 744 |
+
>>> FeatStruct('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]')
|
| 745 |
+
VP[agr=?x, +fin, tense='past']/NP[agr=?x, +pl]
|
| 746 |
+
>>> FeatStruct('S[sem = <app(?subj, ?vp)>]')
|
| 747 |
+
S[sem=<?subj(?vp)>]
|
| 748 |
+
|
| 749 |
+
>>> FeatStruct('S')
|
| 750 |
+
S[]
|
| 751 |
+
|
| 752 |
+
The parser also includes support for reading sets and tuples.
|
| 753 |
+
|
| 754 |
+
>>> FeatStruct('[x={1,2,2,2}, y={/}]')
|
| 755 |
+
[x={1, 2}, y={/}]
|
| 756 |
+
>>> FeatStruct('[x=(1,2,2,2), y=()]')
|
| 757 |
+
[x=(1, 2, 2, 2), y=()]
|
| 758 |
+
>>> print(FeatStruct('[x=(1,[z=(1,2,?x)],?z,{/})]'))
|
| 759 |
+
[ x = (1, [ z = (1, 2, ?x) ], ?z, {/}) ]
|
| 760 |
+
|
| 761 |
+
Note that we can't put a featstruct inside a tuple, because doing so
|
| 762 |
+
would hash it, and it's not frozen yet:
|
| 763 |
+
|
| 764 |
+
>>> print(FeatStruct('[x={[]}]'))
|
| 765 |
+
Traceback (most recent call last):
|
| 766 |
+
. . .
|
| 767 |
+
TypeError: FeatStructs must be frozen before they can be hashed.
|
| 768 |
+
|
| 769 |
+
There's a special syntax for taking the union of sets: "{...+...}".
|
| 770 |
+
The elements should only be variables or sets.
|
| 771 |
+
|
| 772 |
+
>>> FeatStruct('[x={?a+?b+{1,2,3}}]')
|
| 773 |
+
[x={?a+?b+{1, 2, 3}}]
|
| 774 |
+
|
| 775 |
+
There's a special syntax for taking the concatenation of tuples:
|
| 776 |
+
"(...+...)". The elements should only be variables or tuples.
|
| 777 |
+
|
| 778 |
+
>>> FeatStruct('[x=(?a+?b+(1,2,3))]')
|
| 779 |
+
[x=(?a+?b+(1, 2, 3))]
|
| 780 |
+
|
| 781 |
+
Parsing gives helpful messages if your string contains an error.
|
| 782 |
+
|
| 783 |
+
>>> FeatStruct('[a=, b=5]]')
|
| 784 |
+
Traceback (most recent call last):
|
| 785 |
+
. . .
|
| 786 |
+
ValueError: Error parsing feature structure
|
| 787 |
+
[a=, b=5]]
|
| 788 |
+
^ Expected value
|
| 789 |
+
>>> FeatStruct('[a=12 22, b=33]')
|
| 790 |
+
Traceback (most recent call last):
|
| 791 |
+
. . .
|
| 792 |
+
ValueError: Error parsing feature structure
|
| 793 |
+
[a=12 22, b=33]
|
| 794 |
+
^ Expected comma
|
| 795 |
+
>>> FeatStruct('[a=5] [b=6]')
|
| 796 |
+
Traceback (most recent call last):
|
| 797 |
+
. . .
|
| 798 |
+
ValueError: Error parsing feature structure
|
| 799 |
+
[a=5] [b=6]
|
| 800 |
+
^ Expected end of string
|
| 801 |
+
>>> FeatStruct(' *++*')
|
| 802 |
+
Traceback (most recent call last):
|
| 803 |
+
. . .
|
| 804 |
+
ValueError: Error parsing feature structure
|
| 805 |
+
*++*
|
| 806 |
+
^ Expected open bracket or identifier
|
| 807 |
+
>>> FeatStruct('[x->(1)]')
|
| 808 |
+
Traceback (most recent call last):
|
| 809 |
+
. . .
|
| 810 |
+
ValueError: Error parsing feature structure
|
| 811 |
+
[x->(1)]
|
| 812 |
+
^ Expected bound identifier
|
| 813 |
+
>>> FeatStruct('[x->y]')
|
| 814 |
+
Traceback (most recent call last):
|
| 815 |
+
. . .
|
| 816 |
+
ValueError: Error parsing feature structure
|
| 817 |
+
[x->y]
|
| 818 |
+
^ Expected identifier
|
| 819 |
+
>>> FeatStruct('')
|
| 820 |
+
Traceback (most recent call last):
|
| 821 |
+
. . .
|
| 822 |
+
ValueError: Error parsing feature structure
|
| 823 |
+
<BLANKLINE>
|
| 824 |
+
^ Expected open bracket or identifier
|
| 825 |
+
|
| 826 |
+
|
| 827 |
+
Unification
|
| 828 |
+
-----------
|
| 829 |
+
Very simple unifications give the expected results:
|
| 830 |
+
|
| 831 |
+
>>> FeatStruct().unify(FeatStruct())
|
| 832 |
+
[]
|
| 833 |
+
>>> FeatStruct(number='singular').unify(FeatStruct())
|
| 834 |
+
[number='singular']
|
| 835 |
+
>>> FeatStruct().unify(FeatStruct(number='singular'))
|
| 836 |
+
[number='singular']
|
| 837 |
+
>>> FeatStruct(number='singular').unify(FeatStruct(person=3))
|
| 838 |
+
[number='singular', person=3]
|
| 839 |
+
|
| 840 |
+
Merging nested structures:
|
| 841 |
+
|
| 842 |
+
>>> fs1 = FeatStruct('[A=[B=b]]')
|
| 843 |
+
>>> fs2 = FeatStruct('[A=[C=c]]')
|
| 844 |
+
>>> fs1.unify(fs2)
|
| 845 |
+
[A=[B='b', C='c']]
|
| 846 |
+
>>> fs2.unify(fs1)
|
| 847 |
+
[A=[B='b', C='c']]
|
| 848 |
+
|
| 849 |
+
A basic case of reentrant unification
|
| 850 |
+
|
| 851 |
+
>>> fs4 = FeatStruct('[A=(1)[B=b], E=[F->(1)]]')
|
| 852 |
+
>>> fs5 = FeatStruct("[A=[C='c'], E=[F=[D='d']]]")
|
| 853 |
+
>>> fs4.unify(fs5)
|
| 854 |
+
[A=(1)[B='b', C='c', D='d'], E=[F->(1)]]
|
| 855 |
+
>>> fs5.unify(fs4)
|
| 856 |
+
[A=(1)[B='b', C='c', D='d'], E=[F->(1)]]
|
| 857 |
+
|
| 858 |
+
More than 2 paths to a value
|
| 859 |
+
|
| 860 |
+
>>> fs1 = FeatStruct("[a=[],b=[],c=[],d=[]]")
|
| 861 |
+
>>> fs2 = FeatStruct('[a=(1)[], b->(1), c->(1), d->(1)]')
|
| 862 |
+
>>> fs1.unify(fs2)
|
| 863 |
+
[a=(1)[], b->(1), c->(1), d->(1)]
|
| 864 |
+
|
| 865 |
+
fs1[a] gets unified with itself
|
| 866 |
+
|
| 867 |
+
>>> fs1 = FeatStruct('[x=(1)[], y->(1)]')
|
| 868 |
+
>>> fs2 = FeatStruct('[x=(1)[], y->(1)]')
|
| 869 |
+
>>> fs1.unify(fs2)
|
| 870 |
+
[x=(1)[], y->(1)]
|
| 871 |
+
|
| 872 |
+
Bound variables should get forwarded appropriately
|
| 873 |
+
|
| 874 |
+
>>> fs1 = FeatStruct('[A=(1)[X=x], B->(1), C=?cvar, D=?dvar]')
|
| 875 |
+
>>> fs2 = FeatStruct('[A=(1)[Y=y], B=(2)[Z=z], C->(1), D->(2)]')
|
| 876 |
+
>>> fs1.unify(fs2)
|
| 877 |
+
[A=(1)[X='x', Y='y', Z='z'], B->(1), C->(1), D->(1)]
|
| 878 |
+
>>> fs2.unify(fs1)
|
| 879 |
+
[A=(1)[X='x', Y='y', Z='z'], B->(1), C->(1), D->(1)]
|
| 880 |
+
|
| 881 |
+
Cyclic structure created by unification.
|
| 882 |
+
|
| 883 |
+
>>> fs1 = FeatStruct('[F=(1)[], G->(1)]')
|
| 884 |
+
>>> fs2 = FeatStruct('[F=[H=(2)[]], G->(2)]')
|
| 885 |
+
>>> fs3 = fs1.unify(fs2)
|
| 886 |
+
>>> fs3
|
| 887 |
+
[F=(1)[H->(1)], G->(1)]
|
| 888 |
+
>>> fs3['F'] is fs3['G']
|
| 889 |
+
True
|
| 890 |
+
>>> fs3['F'] is fs3['G']['H']
|
| 891 |
+
True
|
| 892 |
+
>>> fs3['F'] is fs3['G']['H']['H']
|
| 893 |
+
True
|
| 894 |
+
>>> fs3['F'] is fs3['F']['H']['H']['H']['H']['H']['H']['H']['H']
|
| 895 |
+
True
|
| 896 |
+
|
| 897 |
+
Cyclic structure created w/ variables.
|
| 898 |
+
|
| 899 |
+
>>> fs1 = FeatStruct('[F=[H=?x]]')
|
| 900 |
+
>>> fs2 = FeatStruct('[F=?x]')
|
| 901 |
+
>>> fs3 = fs1.unify(fs2, rename_vars=False)
|
| 902 |
+
>>> fs3
|
| 903 |
+
[F=(1)[H->(1)]]
|
| 904 |
+
>>> fs3['F'] is fs3['F']['H']
|
| 905 |
+
True
|
| 906 |
+
>>> fs3['F'] is fs3['F']['H']['H']
|
| 907 |
+
True
|
| 908 |
+
>>> fs3['F'] is fs3['F']['H']['H']['H']['H']['H']['H']['H']['H']
|
| 909 |
+
True
|
| 910 |
+
|
| 911 |
+
Unifying w/ a cyclic feature structure.
|
| 912 |
+
|
| 913 |
+
>>> fs4 = FeatStruct('[F=[H=[H=[H=(1)[]]]], K->(1)]')
|
| 914 |
+
>>> fs3.unify(fs4)
|
| 915 |
+
[F=(1)[H->(1)], K->(1)]
|
| 916 |
+
>>> fs4.unify(fs3)
|
| 917 |
+
[F=(1)[H->(1)], K->(1)]
|
| 918 |
+
|
| 919 |
+
Variable bindings should preserve reentrance.
|
| 920 |
+
|
| 921 |
+
>>> bindings = {}
|
| 922 |
+
>>> fs1 = FeatStruct("[a=?x]")
|
| 923 |
+
>>> fs2 = fs1.unify(FeatStruct("[a=[]]"), bindings)
|
| 924 |
+
>>> fs2['a'] is bindings[Variable('?x')]
|
| 925 |
+
True
|
| 926 |
+
>>> fs2.unify(FeatStruct("[b=?x]"), bindings)
|
| 927 |
+
[a=(1)[], b->(1)]
|
| 928 |
+
|
| 929 |
+
Aliased variable tests
|
| 930 |
+
|
| 931 |
+
>>> fs1 = FeatStruct("[a=?x, b=?x]")
|
| 932 |
+
>>> fs2 = FeatStruct("[b=?y, c=?y]")
|
| 933 |
+
>>> bindings = {}
|
| 934 |
+
>>> fs3 = fs1.unify(fs2, bindings)
|
| 935 |
+
>>> fs3
|
| 936 |
+
[a=?x, b=?x, c=?x]
|
| 937 |
+
>>> bindings
|
| 938 |
+
{Variable('?y'): Variable('?x')}
|
| 939 |
+
>>> fs3.unify(FeatStruct("[a=1]"))
|
| 940 |
+
[a=1, b=1, c=1]
|
| 941 |
+
|
| 942 |
+
If we keep track of the bindings, then we can use the same variable
|
| 943 |
+
over multiple calls to unify.
|
| 944 |
+
|
| 945 |
+
>>> bindings = {}
|
| 946 |
+
>>> fs1 = FeatStruct('[a=?x]')
|
| 947 |
+
>>> fs2 = fs1.unify(FeatStruct('[a=[]]'), bindings)
|
| 948 |
+
>>> fs2.unify(FeatStruct('[b=?x]'), bindings)
|
| 949 |
+
[a=(1)[], b->(1)]
|
| 950 |
+
>>> bindings
|
| 951 |
+
{Variable('?x'): []}
|
| 952 |
+
|
| 953 |
+
..
|
| 954 |
+
>>> del fs1, fs2, fs3, fs4, fs5 # clean-up
|
| 955 |
+
|
| 956 |
+
Unification Bindings
|
| 957 |
+
--------------------
|
| 958 |
+
|
| 959 |
+
>>> bindings = {}
|
| 960 |
+
>>> fs1 = FeatStruct('[a=?x]')
|
| 961 |
+
>>> fs2 = FeatStruct('[a=12]')
|
| 962 |
+
>>> fs3 = FeatStruct('[b=?x]')
|
| 963 |
+
>>> fs1.unify(fs2, bindings)
|
| 964 |
+
[a=12]
|
| 965 |
+
>>> bindings
|
| 966 |
+
{Variable('?x'): 12}
|
| 967 |
+
>>> fs3.substitute_bindings(bindings)
|
| 968 |
+
[b=12]
|
| 969 |
+
>>> fs3 # substitute_bindings didn't mutate fs3.
|
| 970 |
+
[b=?x]
|
| 971 |
+
>>> fs2.unify(fs3, bindings)
|
| 972 |
+
[a=12, b=12]
|
| 973 |
+
|
| 974 |
+
>>> bindings = {}
|
| 975 |
+
>>> fs1 = FeatStruct('[a=?x, b=1]')
|
| 976 |
+
>>> fs2 = FeatStruct('[a=5, b=?x]')
|
| 977 |
+
>>> fs1.unify(fs2, bindings)
|
| 978 |
+
[a=5, b=1]
|
| 979 |
+
>>> sorted(bindings.items())
|
| 980 |
+
[(Variable('?x'), 5), (Variable('?x2'), 1)]
|
| 981 |
+
|
| 982 |
+
..
|
| 983 |
+
>>> del fs1, fs2, fs3 # clean-up
|
| 984 |
+
|
| 985 |
+
Expressions
|
| 986 |
+
-----------
|
| 987 |
+
|
| 988 |
+
>>> e = Expression.fromstring('\\P y.P(z,y)')
|
| 989 |
+
>>> fs1 = FeatStruct(x=e, y=Variable('z'))
|
| 990 |
+
>>> fs2 = FeatStruct(y=VariableExpression(Variable('John')))
|
| 991 |
+
>>> fs1.unify(fs2)
|
| 992 |
+
[x=<\P y.P(John,y)>, y=<John>]
|
| 993 |
+
|
| 994 |
+
Remove Variables
|
| 995 |
+
----------------
|
| 996 |
+
|
| 997 |
+
>>> FeatStruct('[a=?x, b=12, c=[d=?y]]').remove_variables()
|
| 998 |
+
[b=12, c=[]]
|
| 999 |
+
>>> FeatStruct('(1)[a=[b=?x,c->(1)]]').remove_variables()
|
| 1000 |
+
(1)[a=[c->(1)]]
|
| 1001 |
+
|
| 1002 |
+
Equality & Hashing
|
| 1003 |
+
------------------
|
| 1004 |
+
The `equal_values` method checks whether two feature structures assign
|
| 1005 |
+
the same value to every feature. If the optional argument
|
| 1006 |
+
``check_reentrances`` is supplied, then it also returns false if there
|
| 1007 |
+
is any difference in the reentrances.
|
| 1008 |
+
|
| 1009 |
+
>>> a = FeatStruct('(1)[x->(1)]')
|
| 1010 |
+
>>> b = FeatStruct('(1)[x->(1)]')
|
| 1011 |
+
>>> c = FeatStruct('(1)[x=[x->(1)]]')
|
| 1012 |
+
>>> d = FeatStruct('[x=(1)[x->(1)]]')
|
| 1013 |
+
>>> e = FeatStruct('(1)[x=[x->(1), y=1], y=1]')
|
| 1014 |
+
>>> def compare(x,y):
|
| 1015 |
+
... assert x.equal_values(y, True) == y.equal_values(x, True)
|
| 1016 |
+
... assert x.equal_values(y, False) == y.equal_values(x, False)
|
| 1017 |
+
... if x.equal_values(y, True):
|
| 1018 |
+
... assert x.equal_values(y, False)
|
| 1019 |
+
... print('equal values, same reentrance')
|
| 1020 |
+
... elif x.equal_values(y, False):
|
| 1021 |
+
... print('equal values, different reentrance')
|
| 1022 |
+
... else:
|
| 1023 |
+
... print('different values')
|
| 1024 |
+
|
| 1025 |
+
>>> compare(a, a)
|
| 1026 |
+
equal values, same reentrance
|
| 1027 |
+
>>> compare(a, b)
|
| 1028 |
+
equal values, same reentrance
|
| 1029 |
+
>>> compare(a, c)
|
| 1030 |
+
equal values, different reentrance
|
| 1031 |
+
>>> compare(a, d)
|
| 1032 |
+
equal values, different reentrance
|
| 1033 |
+
>>> compare(c, d)
|
| 1034 |
+
equal values, different reentrance
|
| 1035 |
+
>>> compare(a, e)
|
| 1036 |
+
different values
|
| 1037 |
+
>>> compare(c, e)
|
| 1038 |
+
different values
|
| 1039 |
+
>>> compare(d, e)
|
| 1040 |
+
different values
|
| 1041 |
+
>>> compare(e, e)
|
| 1042 |
+
equal values, same reentrance
|
| 1043 |
+
|
| 1044 |
+
Feature structures may not be hashed until they are frozen:
|
| 1045 |
+
|
| 1046 |
+
>>> hash(a)
|
| 1047 |
+
Traceback (most recent call last):
|
| 1048 |
+
. . .
|
| 1049 |
+
TypeError: FeatStructs must be frozen before they can be hashed.
|
| 1050 |
+
>>> a.freeze()
|
| 1051 |
+
>>> v = hash(a)
|
| 1052 |
+
|
| 1053 |
+
Feature structures define hash consistently. The following example
|
| 1054 |
+
looks at the hash value for each (fs1,fs2) pair; if their hash values
|
| 1055 |
+
are not equal, then they must not be equal. If their hash values are
|
| 1056 |
+
equal, then display a message, and indicate whether their values are
|
| 1057 |
+
indeed equal. Note that c and d currently have the same hash value,
|
| 1058 |
+
even though they are not equal. That is not a bug, strictly speaking,
|
| 1059 |
+
but it wouldn't be a bad thing if it changed.
|
| 1060 |
+
|
| 1061 |
+
>>> for fstruct in (a, b, c, d, e):
|
| 1062 |
+
... fstruct.freeze()
|
| 1063 |
+
>>> for fs1_name in 'abcde':
|
| 1064 |
+
... for fs2_name in 'abcde':
|
| 1065 |
+
... fs1 = locals()[fs1_name]
|
| 1066 |
+
... fs2 = locals()[fs2_name]
|
| 1067 |
+
... if hash(fs1) != hash(fs2):
|
| 1068 |
+
... assert fs1 != fs2
|
| 1069 |
+
... else:
|
| 1070 |
+
... print('%s and %s have the same hash value,' %
|
| 1071 |
+
... (fs1_name, fs2_name))
|
| 1072 |
+
... if fs1 == fs2: print('and are equal')
|
| 1073 |
+
... else: print('and are not equal')
|
| 1074 |
+
a and a have the same hash value, and are equal
|
| 1075 |
+
a and b have the same hash value, and are equal
|
| 1076 |
+
b and a have the same hash value, and are equal
|
| 1077 |
+
b and b have the same hash value, and are equal
|
| 1078 |
+
c and c have the same hash value, and are equal
|
| 1079 |
+
c and d have the same hash value, and are not equal
|
| 1080 |
+
d and c have the same hash value, and are not equal
|
| 1081 |
+
d and d have the same hash value, and are equal
|
| 1082 |
+
e and e have the same hash value, and are equal
|
| 1083 |
+
|
| 1084 |
+
..
|
| 1085 |
+
>>> del a, b, c, d, e, v # clean-up
|
| 1086 |
+
|
| 1087 |
+
Tracing
|
| 1088 |
+
-------
|
| 1089 |
+
|
| 1090 |
+
>>> fs1 = FeatStruct('[a=[b=(1)[], c=?x], d->(1), e=[f=?x]]')
|
| 1091 |
+
>>> fs2 = FeatStruct('[a=(1)[c="C"], e=[g->(1)]]')
|
| 1092 |
+
>>> fs1.unify(fs2, trace=True)
|
| 1093 |
+
<BLANKLINE>
|
| 1094 |
+
Unification trace:
|
| 1095 |
+
/ [a=[b=(1)[], c=?x], d->(1), e=[f=?x]]
|
| 1096 |
+
|\ [a=(1)[c='C'], e=[g->(1)]]
|
| 1097 |
+
|
|
| 1098 |
+
| Unify feature: a
|
| 1099 |
+
| / [b=[], c=?x]
|
| 1100 |
+
| |\ [c='C']
|
| 1101 |
+
| |
|
| 1102 |
+
| | Unify feature: a.c
|
| 1103 |
+
| | / ?x
|
| 1104 |
+
| | |\ 'C'
|
| 1105 |
+
| | |
|
| 1106 |
+
| | +-->Variable('?x')
|
| 1107 |
+
| |
|
| 1108 |
+
| +-->[b=[], c=?x]
|
| 1109 |
+
| Bindings: {?x: 'C'}
|
| 1110 |
+
|
|
| 1111 |
+
| Unify feature: e
|
| 1112 |
+
| / [f=?x]
|
| 1113 |
+
| |\ [g=[c='C']]
|
| 1114 |
+
| |
|
| 1115 |
+
| +-->[f=?x, g=[b=[], c=?x]]
|
| 1116 |
+
| Bindings: {?x: 'C'}
|
| 1117 |
+
|
|
| 1118 |
+
+-->[a=(1)[b=(2)[], c='C'], d->(2), e=[f='C', g->(1)]]
|
| 1119 |
+
Bindings: {?x: 'C'}
|
| 1120 |
+
[a=(1)[b=(2)[], c='C'], d->(2), e=[f='C', g->(1)]]
|
| 1121 |
+
>>>
|
| 1122 |
+
>>> fs1 = FeatStruct('[a=?x, b=?z, c=?z]')
|
| 1123 |
+
>>> fs2 = FeatStruct('[a=?y, b=?y, c=?q]')
|
| 1124 |
+
>>> #fs1.unify(fs2, trace=True)
|
| 1125 |
+
>>>
|
| 1126 |
+
|
| 1127 |
+
..
|
| 1128 |
+
>>> del fs1, fs2 # clean-up
|
| 1129 |
+
|
| 1130 |
+
Unification on Dicts & Lists
|
| 1131 |
+
----------------------------
|
| 1132 |
+
It's possible to do unification on dictionaries:
|
| 1133 |
+
|
| 1134 |
+
>>> from nltk.featstruct import unify
|
| 1135 |
+
>>> pprint(unify(dict(x=1, y=dict(z=2)), dict(x=1, q=5)), width=1)
|
| 1136 |
+
{'q': 5, 'x': 1, 'y': {'z': 2}}
|
| 1137 |
+
|
| 1138 |
+
It's possible to do unification on lists as well:
|
| 1139 |
+
|
| 1140 |
+
>>> unify([1, 2, 3], [1, Variable('x'), 3])
|
| 1141 |
+
[1, 2, 3]
|
| 1142 |
+
|
| 1143 |
+
Mixing dicts and lists is fine:
|
| 1144 |
+
|
| 1145 |
+
>>> pprint(unify([dict(x=1, y=dict(z=2)),3], [dict(x=1, q=5),3]),
|
| 1146 |
+
... width=1)
|
| 1147 |
+
[{'q': 5, 'x': 1, 'y': {'z': 2}}, 3]
|
| 1148 |
+
|
| 1149 |
+
Mixing dicts and FeatStructs is discouraged:
|
| 1150 |
+
|
| 1151 |
+
>>> unify(dict(x=1), FeatStruct(x=1))
|
| 1152 |
+
Traceback (most recent call last):
|
| 1153 |
+
. . .
|
| 1154 |
+
ValueError: Mixing FeatStruct objects with Python dicts and lists is not supported.
|
| 1155 |
+
|
| 1156 |
+
But you can do it if you really want, by explicitly stating that both
|
| 1157 |
+
dictionaries and FeatStructs should be treated as feature structures:
|
| 1158 |
+
|
| 1159 |
+
>>> unify(dict(x=1), FeatStruct(x=1), fs_class=(dict, FeatStruct))
|
| 1160 |
+
{'x': 1}
|
| 1161 |
+
|
| 1162 |
+
Finding Conflicts
|
| 1163 |
+
-----------------
|
| 1164 |
+
|
| 1165 |
+
>>> from nltk.featstruct import conflicts
|
| 1166 |
+
>>> fs1 = FeatStruct('[a=[b=(1)[c=2], d->(1), e=[f->(1)]]]')
|
| 1167 |
+
>>> fs2 = FeatStruct('[a=[b=[c=[x=5]], d=[c=2], e=[f=[c=3]]]]')
|
| 1168 |
+
>>> for path in conflicts(fs1, fs2):
|
| 1169 |
+
... print('%-8s: %r vs %r' % ('.'.join(path), fs1[path], fs2[path]))
|
| 1170 |
+
a.b.c : 2 vs [x=5]
|
| 1171 |
+
a.e.f.c : 2 vs 3
|
| 1172 |
+
|
| 1173 |
+
..
|
| 1174 |
+
>>> del fs1, fs2 # clean-up
|
| 1175 |
+
|
| 1176 |
+
Retracting Bindings
|
| 1177 |
+
-------------------
|
| 1178 |
+
|
| 1179 |
+
>>> from nltk.featstruct import retract_bindings
|
| 1180 |
+
>>> bindings = {}
|
| 1181 |
+
>>> fs1 = FeatStruct('[a=?x, b=[c=?y]]')
|
| 1182 |
+
>>> fs2 = FeatStruct('[a=(1)[c=[d=1]], b->(1)]')
|
| 1183 |
+
>>> fs3 = fs1.unify(fs2, bindings)
|
| 1184 |
+
>>> print(fs3)
|
| 1185 |
+
[ a = (1) [ c = [ d = 1 ] ] ]
|
| 1186 |
+
[ ]
|
| 1187 |
+
[ b -> (1) ]
|
| 1188 |
+
>>> pprint(bindings)
|
| 1189 |
+
{Variable('?x'): [c=[d=1]], Variable('?y'): [d=1]}
|
| 1190 |
+
>>> retract_bindings(fs3, bindings)
|
| 1191 |
+
[a=?x, b=?x]
|
| 1192 |
+
>>> pprint(bindings)
|
| 1193 |
+
{Variable('?x'): [c=?y], Variable('?y'): [d=1]}
|
| 1194 |
+
|
| 1195 |
+
Squashed Bugs
|
| 1196 |
+
~~~~~~~~~~~~~
|
| 1197 |
+
In svn rev 5167, unifying two feature structures that used the same
|
| 1198 |
+
variable would cause those variables to become aliased in the output.
|
| 1199 |
+
|
| 1200 |
+
>>> fs1 = FeatStruct('[a=?x]')
|
| 1201 |
+
>>> fs2 = FeatStruct('[b=?x]')
|
| 1202 |
+
>>> fs1.unify(fs2)
|
| 1203 |
+
[a=?x, b=?x2]
|
| 1204 |
+
|
| 1205 |
+
There was a bug in svn revision 5172 that caused `rename_variables` to
|
| 1206 |
+
rename variables to names that are already used.
|
| 1207 |
+
|
| 1208 |
+
>>> FeatStruct('[a=?x, b=?x2]').rename_variables(
|
| 1209 |
+
... vars=[Variable('?x')])
|
| 1210 |
+
[a=?x3, b=?x2]
|
| 1211 |
+
>>> fs1 = FeatStruct('[a=?x]')
|
| 1212 |
+
>>> fs2 = FeatStruct('[a=?x, b=?x2]')
|
| 1213 |
+
>>> fs1.unify(fs2)
|
| 1214 |
+
[a=?x, b=?x2]
|
| 1215 |
+
|
| 1216 |
+
There was a bug in svn rev 5167 that caused us to get the following
|
| 1217 |
+
example wrong. Basically the problem was that we only followed
|
| 1218 |
+
'forward' pointers for other, not self, when unifying two feature
|
| 1219 |
+
structures. (nb: this test assumes that features are unified in
|
| 1220 |
+
alphabetical order -- if they are not, it might pass even if the bug
|
| 1221 |
+
is present.)
|
| 1222 |
+
|
| 1223 |
+
>>> fs1 = FeatStruct('[a=[x=1], b=?x, c=?x]')
|
| 1224 |
+
>>> fs2 = FeatStruct('[a=(1)[], b->(1), c=[x=2]]')
|
| 1225 |
+
>>> print(fs1.unify(fs2))
|
| 1226 |
+
None
|
| 1227 |
+
|
| 1228 |
+
..
|
| 1229 |
+
>>> del fs1, fs2 # clean-up
|
lib/python3.10/site-packages/nltk/test/framenet.doctest
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
========
|
| 5 |
+
FrameNet
|
| 6 |
+
========
|
| 7 |
+
|
| 8 |
+
The FrameNet corpus is a lexical database of English that is both human-
|
| 9 |
+
and machine-readable, based on annotating examples of how words are used
|
| 10 |
+
in actual texts. FrameNet is based on a theory of meaning called Frame
|
| 11 |
+
Semantics, deriving from the work of Charles J. Fillmore and colleagues.
|
| 12 |
+
The basic idea is straightforward: that the meanings of most words can
|
| 13 |
+
best be understood on the basis of a semantic frame: a description of a
|
| 14 |
+
type of event, relation, or entity and the participants in it. For
|
| 15 |
+
example, the concept of cooking typically involves a person doing the
|
| 16 |
+
cooking (Cook), the food that is to be cooked (Food), something to hold
|
| 17 |
+
the food while cooking (Container) and a source of heat
|
| 18 |
+
(Heating_instrument). In the FrameNet project, this is represented as a
|
| 19 |
+
frame called Apply_heat, and the Cook, Food, Heating_instrument and
|
| 20 |
+
Container are called frame elements (FEs). Words that evoke this frame,
|
| 21 |
+
such as fry, bake, boil, and broil, are called lexical units (LUs) of
|
| 22 |
+
the Apply_heat frame. The job of FrameNet is to define the frames
|
| 23 |
+
and to annotate sentences to show how the FEs fit syntactically around
|
| 24 |
+
the word that evokes the frame.
|
| 25 |
+
|
| 26 |
+
------
|
| 27 |
+
Frames
|
| 28 |
+
------
|
| 29 |
+
|
| 30 |
+
A Frame is a script-like conceptual structure that describes a
|
| 31 |
+
particular type of situation, object, or event along with the
|
| 32 |
+
participants and props that are needed for that Frame. For
|
| 33 |
+
example, the "Apply_heat" frame describes a common situation
|
| 34 |
+
involving a Cook, some Food, and a Heating_Instrument, and is
|
| 35 |
+
evoked by words such as bake, blanch, boil, broil, brown,
|
| 36 |
+
simmer, steam, etc.
|
| 37 |
+
|
| 38 |
+
We call the roles of a Frame "frame elements" (FEs) and the
|
| 39 |
+
frame-evoking words are called "lexical units" (LUs).
|
| 40 |
+
|
| 41 |
+
FrameNet includes relations between Frames. Several types of
|
| 42 |
+
relations are defined, of which the most important are:
|
| 43 |
+
|
| 44 |
+
- Inheritance: An IS-A relation. The child frame is a subtype
|
| 45 |
+
of the parent frame, and each FE in the parent is bound to
|
| 46 |
+
a corresponding FE in the child. An example is the
|
| 47 |
+
"Revenge" frame which inherits from the
|
| 48 |
+
"Rewards_and_punishments" frame.
|
| 49 |
+
|
| 50 |
+
- Using: The child frame presupposes the parent frame as
|
| 51 |
+
background, e.g the "Speed" frame "uses" (or presupposes)
|
| 52 |
+
the "Motion" frame; however, not all parent FEs need to be
|
| 53 |
+
bound to child FEs.
|
| 54 |
+
|
| 55 |
+
- Subframe: The child frame is a subevent of a complex event
|
| 56 |
+
represented by the parent, e.g. the "Criminal_process" frame
|
| 57 |
+
has subframes of "Arrest", "Arraignment", "Trial", and
|
| 58 |
+
"Sentencing".
|
| 59 |
+
|
| 60 |
+
- Perspective_on: The child frame provides a particular
|
| 61 |
+
perspective on an un-perspectivized parent frame. A pair of
|
| 62 |
+
examples consists of the "Hiring" and "Get_a_job" frames,
|
| 63 |
+
which perspectivize the "Employment_start" frame from the
|
| 64 |
+
Employer's and the Employee's point of view, respectively.
|
| 65 |
+
|
| 66 |
+
To get a list of all of the Frames in FrameNet, you can use the
|
| 67 |
+
`frames()` function. If you supply a regular expression pattern to the
|
| 68 |
+
`frames()` function, you will get a list of all Frames whose names match
|
| 69 |
+
that pattern:
|
| 70 |
+
|
| 71 |
+
>>> from pprint import pprint
|
| 72 |
+
>>> from operator import itemgetter
|
| 73 |
+
>>> from nltk.corpus import framenet as fn
|
| 74 |
+
>>> from nltk.corpus.reader.framenet import PrettyList
|
| 75 |
+
>>> x = fn.frames(r'(?i)crim')
|
| 76 |
+
>>> x.sort(key=itemgetter('ID'))
|
| 77 |
+
>>> x
|
| 78 |
+
[<frame ID=200 name=Criminal_process>, <frame ID=500 name=Criminal_investigation>, ...]
|
| 79 |
+
>>> PrettyList(sorted(x, key=itemgetter('ID')))
|
| 80 |
+
[<frame ID=200 name=Criminal_process>, <frame ID=500 name=Criminal_investigation>, ...]
|
| 81 |
+
|
| 82 |
+
To get the details of a particular Frame, you can use the `frame()`
|
| 83 |
+
function passing in the frame number:
|
| 84 |
+
|
| 85 |
+
>>> from pprint import pprint
|
| 86 |
+
>>> from nltk.corpus import framenet as fn
|
| 87 |
+
>>> f = fn.frame(202)
|
| 88 |
+
>>> f.ID
|
| 89 |
+
202
|
| 90 |
+
>>> f.name
|
| 91 |
+
'Arrest'
|
| 92 |
+
>>> f.definition
|
| 93 |
+
"Authorities charge a Suspect, who is under suspicion of having committed a crime..."
|
| 94 |
+
>>> len(f.lexUnit)
|
| 95 |
+
11
|
| 96 |
+
>>> pprint(sorted([x for x in f.FE]))
|
| 97 |
+
['Authorities',
|
| 98 |
+
'Charges',
|
| 99 |
+
'Co-participant',
|
| 100 |
+
'Manner',
|
| 101 |
+
'Means',
|
| 102 |
+
'Offense',
|
| 103 |
+
'Place',
|
| 104 |
+
'Purpose',
|
| 105 |
+
'Source_of_legal_authority',
|
| 106 |
+
'Suspect',
|
| 107 |
+
'Time',
|
| 108 |
+
'Type']
|
| 109 |
+
>>> pprint(f.frameRelations)
|
| 110 |
+
[<Parent=Intentionally_affect -- Inheritance -> Child=Arrest>, <Complex=Criminal_process -- Subframe -> Component=Arrest>, ...]
|
| 111 |
+
|
| 112 |
+
The `frame()` function shown above returns a dict object containing
|
| 113 |
+
detailed information about the Frame. See the documentation on the
|
| 114 |
+
`frame()` function for the specifics.
|
| 115 |
+
|
| 116 |
+
You can also search for Frames by their Lexical Units (LUs). The
|
| 117 |
+
`frames_by_lemma()` function returns a list of all frames that contain
|
| 118 |
+
LUs in which the 'name' attribute of the LU matches the given regular
|
| 119 |
+
expression. Note that LU names are composed of "lemma.POS", where the
|
| 120 |
+
"lemma" part can be made up of either a single lexeme (e.g. 'run') or
|
| 121 |
+
multiple lexemes (e.g. 'a little') (see below).
|
| 122 |
+
|
| 123 |
+
>>> PrettyList(sorted(fn.frames_by_lemma(r'(?i)a little'), key=itemgetter('ID')))
|
| 124 |
+
[<frame ID=189 name=Quanti...>, <frame ID=2001 name=Degree>]
|
| 125 |
+
|
| 126 |
+
-------------
|
| 127 |
+
Lexical Units
|
| 128 |
+
-------------
|
| 129 |
+
|
| 130 |
+
A lexical unit (LU) is a pairing of a word with a meaning. For
|
| 131 |
+
example, the "Apply_heat" Frame describes a common situation
|
| 132 |
+
involving a Cook, some Food, and a Heating Instrument, and is
|
| 133 |
+
_evoked_ by words such as bake, blanch, boil, broil, brown,
|
| 134 |
+
simmer, steam, etc. These frame-evoking words are the LUs in the
|
| 135 |
+
Apply_heat frame. Each sense of a polysemous word is a different
|
| 136 |
+
LU.
|
| 137 |
+
|
| 138 |
+
We have used the word "word" in talking about LUs. The reality
|
| 139 |
+
is actually rather complex. When we say that the word "bake" is
|
| 140 |
+
polysemous, we mean that the lemma "bake.v" (which has the
|
| 141 |
+
word-forms "bake", "bakes", "baked", and "baking") is linked to
|
| 142 |
+
three different frames:
|
| 143 |
+
|
| 144 |
+
- Apply_heat: "Michelle baked the potatoes for 45 minutes."
|
| 145 |
+
|
| 146 |
+
- Cooking_creation: "Michelle baked her mother a cake for her birthday."
|
| 147 |
+
|
| 148 |
+
- Absorb_heat: "The potatoes have to bake for more than 30 minutes."
|
| 149 |
+
|
| 150 |
+
These constitute three different LUs, with different
|
| 151 |
+
definitions.
|
| 152 |
+
|
| 153 |
+
Multiword expressions such as "given name" and hyphenated words
|
| 154 |
+
like "shut-eye" can also be LUs. Idiomatic phrases such as
|
| 155 |
+
"middle of nowhere" and "give the slip (to)" are also defined as
|
| 156 |
+
LUs in the appropriate frames ("Isolated_places" and "Evading",
|
| 157 |
+
respectively), and their internal structure is not analyzed.
|
| 158 |
+
|
| 159 |
+
Framenet provides multiple annotated examples of each sense of a
|
| 160 |
+
word (i.e. each LU). Moreover, the set of examples
|
| 161 |
+
(approximately 20 per LU) illustrates all of the combinatorial
|
| 162 |
+
possibilities of the lexical unit.
|
| 163 |
+
|
| 164 |
+
Each LU is linked to a Frame, and hence to the other words which
|
| 165 |
+
evoke that Frame. This makes the FrameNet database similar to a
|
| 166 |
+
thesaurus, grouping together semantically similar words.
|
| 167 |
+
|
| 168 |
+
In the simplest case, frame-evoking words are verbs such as
|
| 169 |
+
"fried" in:
|
| 170 |
+
|
| 171 |
+
"Matilde fried the catfish in a heavy iron skillet."
|
| 172 |
+
|
| 173 |
+
Sometimes event nouns may evoke a Frame. For example,
|
| 174 |
+
"reduction" evokes "Cause_change_of_scalar_position" in:
|
| 175 |
+
|
| 176 |
+
"...the reduction of debt levels to $665 million from $2.6 billion."
|
| 177 |
+
|
| 178 |
+
Adjectives may also evoke a Frame. For example, "asleep" may
|
| 179 |
+
evoke the "Sleep" frame as in:
|
| 180 |
+
|
| 181 |
+
"They were asleep for hours."
|
| 182 |
+
|
| 183 |
+
Many common nouns, such as artifacts like "hat" or "tower",
|
| 184 |
+
typically serve as dependents rather than clearly evoking their
|
| 185 |
+
own frames.
|
| 186 |
+
|
| 187 |
+
Details for a specific lexical unit can be obtained using this class's
|
| 188 |
+
`lus()` function, which takes an optional regular expression
|
| 189 |
+
pattern that will be matched against the name of the lexical unit:
|
| 190 |
+
|
| 191 |
+
>>> from pprint import pprint
|
| 192 |
+
>>> PrettyList(sorted(fn.lus(r'(?i)a little'), key=itemgetter('ID')))
|
| 193 |
+
[<lu ID=14733 name=a little.n>, <lu ID=14743 name=a little.adv>, ...]
|
| 194 |
+
|
| 195 |
+
You can obtain detailed information on a particular LU by calling the
|
| 196 |
+
`lu()` function and passing in an LU's 'ID' number:
|
| 197 |
+
|
| 198 |
+
>>> from pprint import pprint
|
| 199 |
+
>>> from nltk.corpus import framenet as fn
|
| 200 |
+
>>> fn.lu(256).name
|
| 201 |
+
'foresee.v'
|
| 202 |
+
>>> fn.lu(256).definition
|
| 203 |
+
'COD: be aware of beforehand; predict.'
|
| 204 |
+
>>> fn.lu(256).frame.name
|
| 205 |
+
'Expectation'
|
| 206 |
+
>>> fn.lu(256).lexemes[0].name
|
| 207 |
+
'foresee'
|
| 208 |
+
|
| 209 |
+
Note that LU names take the form of a dotted string (e.g. "run.v" or "a
|
| 210 |
+
little.adv") in which a lemma precedes the "." and a part of speech
|
| 211 |
+
(POS) follows the dot. The lemma may be composed of a single lexeme
|
| 212 |
+
(e.g. "run") or of multiple lexemes (e.g. "a little"). The list of
|
| 213 |
+
POSs used in the LUs is:
|
| 214 |
+
|
| 215 |
+
v - verb
|
| 216 |
+
n - noun
|
| 217 |
+
a - adjective
|
| 218 |
+
adv - adverb
|
| 219 |
+
prep - preposition
|
| 220 |
+
num - numbers
|
| 221 |
+
intj - interjection
|
| 222 |
+
art - article
|
| 223 |
+
c - conjunction
|
| 224 |
+
scon - subordinating conjunction
|
| 225 |
+
|
| 226 |
+
For more detailed information about the info that is contained in the
|
| 227 |
+
dict that is returned by the `lu()` function, see the documentation on
|
| 228 |
+
the `lu()` function.
|
| 229 |
+
|
| 230 |
+
-------------------
|
| 231 |
+
Annotated Documents
|
| 232 |
+
-------------------
|
| 233 |
+
|
| 234 |
+
The FrameNet corpus contains a small set of annotated documents. A list
|
| 235 |
+
of these documents can be obtained by calling the `docs()` function:
|
| 236 |
+
|
| 237 |
+
>>> from pprint import pprint
|
| 238 |
+
>>> from nltk.corpus import framenet as fn
|
| 239 |
+
>>> d = fn.docs('BellRinging')[0]
|
| 240 |
+
>>> d.corpname
|
| 241 |
+
'PropBank'
|
| 242 |
+
>>> d.sentence[49]
|
| 243 |
+
full-text sentence (...) in BellRinging:
|
| 244 |
+
<BLANKLINE>
|
| 245 |
+
<BLANKLINE>
|
| 246 |
+
[POS] 17 tags
|
| 247 |
+
<BLANKLINE>
|
| 248 |
+
[POS_tagset] PENN
|
| 249 |
+
<BLANKLINE>
|
| 250 |
+
[text] + [annotationSet]
|
| 251 |
+
<BLANKLINE>
|
| 252 |
+
`` I live in hopes that the ringers themselves will be drawn into
|
| 253 |
+
***** ******* *****
|
| 254 |
+
Desir Cause_t Cause
|
| 255 |
+
[1] [3] [2]
|
| 256 |
+
<BLANKLINE>
|
| 257 |
+
that fuller life .
|
| 258 |
+
******
|
| 259 |
+
Comple
|
| 260 |
+
[4]
|
| 261 |
+
(Desir=Desiring, Cause_t=Cause_to_make_noise, Cause=Cause_motion, Comple=Completeness)
|
| 262 |
+
<BLANKLINE>
|
| 263 |
+
|
| 264 |
+
>>> d.sentence[49].annotationSet[1]
|
| 265 |
+
annotation set (...):
|
| 266 |
+
<BLANKLINE>
|
| 267 |
+
[status] MANUAL
|
| 268 |
+
<BLANKLINE>
|
| 269 |
+
[LU] (6605) hope.n in Desiring
|
| 270 |
+
<BLANKLINE>
|
| 271 |
+
[frame] (366) Desiring
|
| 272 |
+
<BLANKLINE>
|
| 273 |
+
[GF] 2 relations
|
| 274 |
+
<BLANKLINE>
|
| 275 |
+
[PT] 2 phrases
|
| 276 |
+
<BLANKLINE>
|
| 277 |
+
[text] + [Target] + [FE] + [Noun]
|
| 278 |
+
<BLANKLINE>
|
| 279 |
+
`` I live in hopes that the ringers themselves will be drawn into
|
| 280 |
+
- ^^^^ ^^ ***** ----------------------------------------------
|
| 281 |
+
E supp su Event
|
| 282 |
+
<BLANKLINE>
|
| 283 |
+
that fuller life .
|
| 284 |
+
-----------------
|
| 285 |
+
<BLANKLINE>
|
| 286 |
+
(E=Experiencer, su=supp)
|
| 287 |
+
<BLANKLINE>
|
| 288 |
+
<BLANKLINE>
|
lib/python3.10/site-packages/nltk/test/generate.doctest
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
===============================================
|
| 5 |
+
Generating sentences from context-free grammars
|
| 6 |
+
===============================================
|
| 7 |
+
|
| 8 |
+
An example grammar:
|
| 9 |
+
|
| 10 |
+
>>> from nltk.parse.generate import generate, demo_grammar
|
| 11 |
+
>>> from nltk import CFG
|
| 12 |
+
>>> grammar = CFG.fromstring(demo_grammar)
|
| 13 |
+
>>> print(grammar)
|
| 14 |
+
Grammar with 13 productions (start state = S)
|
| 15 |
+
S -> NP VP
|
| 16 |
+
NP -> Det N
|
| 17 |
+
PP -> P NP
|
| 18 |
+
VP -> 'slept'
|
| 19 |
+
VP -> 'saw' NP
|
| 20 |
+
VP -> 'walked' PP
|
| 21 |
+
Det -> 'the'
|
| 22 |
+
Det -> 'a'
|
| 23 |
+
N -> 'man'
|
| 24 |
+
N -> 'park'
|
| 25 |
+
N -> 'dog'
|
| 26 |
+
P -> 'in'
|
| 27 |
+
P -> 'with'
|
| 28 |
+
|
| 29 |
+
The first 10 generated sentences:
|
| 30 |
+
|
| 31 |
+
>>> for sentence in generate(grammar, n=10):
|
| 32 |
+
... print(' '.join(sentence))
|
| 33 |
+
the man slept
|
| 34 |
+
the man saw the man
|
| 35 |
+
the man saw the park
|
| 36 |
+
the man saw the dog
|
| 37 |
+
the man saw a man
|
| 38 |
+
the man saw a park
|
| 39 |
+
the man saw a dog
|
| 40 |
+
the man walked in the man
|
| 41 |
+
the man walked in the park
|
| 42 |
+
the man walked in the dog
|
| 43 |
+
|
| 44 |
+
All sentences of max depth 4:
|
| 45 |
+
|
| 46 |
+
>>> for sentence in generate(grammar, depth=4):
|
| 47 |
+
... print(' '.join(sentence))
|
| 48 |
+
the man slept
|
| 49 |
+
the park slept
|
| 50 |
+
the dog slept
|
| 51 |
+
a man slept
|
| 52 |
+
a park slept
|
| 53 |
+
a dog slept
|
| 54 |
+
|
| 55 |
+
The number of sentences of different max depths:
|
| 56 |
+
|
| 57 |
+
>>> len(list(generate(grammar, depth=3)))
|
| 58 |
+
0
|
| 59 |
+
>>> len(list(generate(grammar, depth=4)))
|
| 60 |
+
6
|
| 61 |
+
>>> len(list(generate(grammar, depth=5)))
|
| 62 |
+
42
|
| 63 |
+
>>> len(list(generate(grammar, depth=6)))
|
| 64 |
+
114
|
| 65 |
+
>>> len(list(generate(grammar)))
|
| 66 |
+
114
|
| 67 |
+
|
| 68 |
+
Infinite grammars will throw a RecursionError when not bounded by some ``depth``:
|
| 69 |
+
|
| 70 |
+
>>> grammar = CFG.fromstring("""
|
| 71 |
+
... S -> A B
|
| 72 |
+
... A -> B
|
| 73 |
+
... B -> "b" | A
|
| 74 |
+
... """)
|
| 75 |
+
>>> list(generate(grammar))
|
| 76 |
+
Traceback (most recent call last):
|
| 77 |
+
...
|
| 78 |
+
RuntimeError: The grammar has rule(s) that yield infinite recursion!
|
lib/python3.10/site-packages/nltk/test/gensim.doctest
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
=======================================
|
| 5 |
+
Demonstrate word embedding using Gensim
|
| 6 |
+
=======================================
|
| 7 |
+
|
| 8 |
+
>>> from nltk.test.gensim_fixt import setup_module
|
| 9 |
+
>>> setup_module()
|
| 10 |
+
|
| 11 |
+
We demonstrate three functions:
|
| 12 |
+
- Train the word embeddings using brown corpus;
|
| 13 |
+
- Load the pre-trained model and perform simple tasks; and
|
| 14 |
+
- Pruning the pre-trained binary model.
|
| 15 |
+
|
| 16 |
+
>>> import gensim
|
| 17 |
+
|
| 18 |
+
---------------
|
| 19 |
+
Train the model
|
| 20 |
+
---------------
|
| 21 |
+
|
| 22 |
+
Here we train a word embedding using the Brown Corpus:
|
| 23 |
+
|
| 24 |
+
>>> from nltk.corpus import brown
|
| 25 |
+
>>> train_set = brown.sents()[:10000]
|
| 26 |
+
>>> model = gensim.models.Word2Vec(train_set)
|
| 27 |
+
|
| 28 |
+
It might take some time to train the model. So, after it is trained, it can be saved as follows:
|
| 29 |
+
|
| 30 |
+
>>> model.save('brown.embedding')
|
| 31 |
+
>>> new_model = gensim.models.Word2Vec.load('brown.embedding')
|
| 32 |
+
|
| 33 |
+
The model will be the list of words with their embedding. We can easily get the vector representation of a word.
|
| 34 |
+
|
| 35 |
+
>>> len(new_model.wv['university'])
|
| 36 |
+
100
|
| 37 |
+
|
| 38 |
+
There are some supporting functions already implemented in Gensim to manipulate with word embeddings.
|
| 39 |
+
For example, to compute the cosine similarity between 2 words:
|
| 40 |
+
|
| 41 |
+
>>> new_model.wv.similarity('university','school') > 0.3
|
| 42 |
+
True
|
| 43 |
+
|
| 44 |
+
---------------------------
|
| 45 |
+
Using the pre-trained model
|
| 46 |
+
---------------------------
|
| 47 |
+
|
| 48 |
+
NLTK includes a pre-trained model which is part of a model that is trained on 100 billion words from the Google News Dataset.
|
| 49 |
+
The full model is from https://code.google.com/p/word2vec/ (about 3 GB).
|
| 50 |
+
|
| 51 |
+
>>> from nltk.data import find
|
| 52 |
+
>>> word2vec_sample = str(find('models/word2vec_sample/pruned.word2vec.txt'))
|
| 53 |
+
>>> model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_sample, binary=False)
|
| 54 |
+
|
| 55 |
+
We pruned the model to only include the most common words (~44k words).
|
| 56 |
+
|
| 57 |
+
>>> len(model)
|
| 58 |
+
43981
|
| 59 |
+
|
| 60 |
+
Each word is represented in the space of 300 dimensions:
|
| 61 |
+
|
| 62 |
+
>>> len(model['university'])
|
| 63 |
+
300
|
| 64 |
+
|
| 65 |
+
Finding the top n words that are similar to a target word is simple. The result is the list of n words with the score.
|
| 66 |
+
|
| 67 |
+
>>> model.most_similar(positive=['university'], topn = 3)
|
| 68 |
+
[('universities', 0.70039...), ('faculty', 0.67809...), ('undergraduate', 0.65870...)]
|
| 69 |
+
|
| 70 |
+
Finding a word that is not in a list is also supported, although, implementing this by yourself is simple.
|
| 71 |
+
|
| 72 |
+
>>> model.doesnt_match('breakfast cereal dinner lunch'.split())
|
| 73 |
+
'cereal'
|
| 74 |
+
|
| 75 |
+
Mikolov et al. (2013) figured out that word embedding captures much of syntactic and semantic regularities. For example,
|
| 76 |
+
the vector 'King - Man + Woman' is close to 'Queen' and 'Germany - Berlin + Paris' is close to 'France'.
|
| 77 |
+
|
| 78 |
+
>>> model.most_similar(positive=['woman','king'], negative=['man'], topn = 1)
|
| 79 |
+
[('queen', 0.71181...)]
|
| 80 |
+
|
| 81 |
+
>>> model.most_similar(positive=['Paris','Germany'], negative=['Berlin'], topn = 1)
|
| 82 |
+
[('France', 0.78840...)]
|
| 83 |
+
|
| 84 |
+
We can visualize the word embeddings using t-SNE (https://lvdmaaten.github.io/tsne/). For this demonstration, we visualize the first 1000 words.
|
| 85 |
+
|
| 86 |
+
| import numpy as np
|
| 87 |
+
| labels = []
|
| 88 |
+
| count = 0
|
| 89 |
+
| max_count = 1000
|
| 90 |
+
| X = np.zeros(shape=(max_count,len(model['university'])))
|
| 91 |
+
|
|
| 92 |
+
| for term in model.index_to_key:
|
| 93 |
+
| X[count] = model[term]
|
| 94 |
+
| labels.append(term)
|
| 95 |
+
| count+= 1
|
| 96 |
+
| if count >= max_count: break
|
| 97 |
+
|
|
| 98 |
+
| # It is recommended to use PCA first to reduce to ~50 dimensions
|
| 99 |
+
| from sklearn.decomposition import PCA
|
| 100 |
+
| pca = PCA(n_components=50)
|
| 101 |
+
| X_50 = pca.fit_transform(X)
|
| 102 |
+
|
|
| 103 |
+
| # Using TSNE to further reduce to 2 dimensions
|
| 104 |
+
| from sklearn.manifold import TSNE
|
| 105 |
+
| model_tsne = TSNE(n_components=2, random_state=0)
|
| 106 |
+
| Y = model_tsne.fit_transform(X_50)
|
| 107 |
+
|
|
| 108 |
+
| # Show the scatter plot
|
| 109 |
+
| import matplotlib.pyplot as plt
|
| 110 |
+
| plt.scatter(Y[:,0], Y[:,1], 20)
|
| 111 |
+
|
|
| 112 |
+
| # Add labels
|
| 113 |
+
| for label, x, y in zip(labels, Y[:, 0], Y[:, 1]):
|
| 114 |
+
| plt.annotate(label, xy = (x,y), xytext = (0, 0), textcoords = 'offset points', size = 10)
|
| 115 |
+
|
|
| 116 |
+
| plt.show()
|
| 117 |
+
|
| 118 |
+
------------------------------
|
| 119 |
+
Prune the trained binary model
|
| 120 |
+
------------------------------
|
| 121 |
+
|
| 122 |
+
Here is the supporting code to extract part of the binary model (GoogleNews-vectors-negative300.bin.gz) from https://code.google.com/p/word2vec/
|
| 123 |
+
We use this code to get the `word2vec_sample` model.
|
| 124 |
+
|
| 125 |
+
| import gensim
|
| 126 |
+
| # Load the binary model
|
| 127 |
+
| model = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary = True)
|
| 128 |
+
|
|
| 129 |
+
| # Only output word that appear in the Brown corpus
|
| 130 |
+
| from nltk.corpus import brown
|
| 131 |
+
| words = set(brown.words())
|
| 132 |
+
| print(len(words))
|
| 133 |
+
|
|
| 134 |
+
| # Output presented word to a temporary file
|
| 135 |
+
| out_file = 'pruned.word2vec.txt'
|
| 136 |
+
| with open(out_file,'w') as f:
|
| 137 |
+
| word_presented = words.intersection(model.index_to_key)
|
| 138 |
+
| f.write('{} {}\n'.format(len(word_presented),len(model['word'])))
|
| 139 |
+
|
|
| 140 |
+
| for word in word_presented:
|
| 141 |
+
| f.write('{} {}\n'.format(word, ' '.join(str(value) for value in model[word])))
|
lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def setup_module():
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from nltk.parse.malt import MaltParser
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
depparser = MaltParser()
|
| 8 |
+
except (AssertionError, LookupError) as e:
|
| 9 |
+
pytest.skip("MaltParser is not available")
|
lib/python3.10/site-packages/nltk/test/grammar.doctest
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
===============
|
| 5 |
+
Grammar Parsing
|
| 6 |
+
===============
|
| 7 |
+
|
| 8 |
+
Grammars can be parsed from strings:
|
| 9 |
+
|
| 10 |
+
>>> from nltk import CFG
|
| 11 |
+
>>> grammar = CFG.fromstring("""
|
| 12 |
+
... S -> NP VP
|
| 13 |
+
... PP -> P NP
|
| 14 |
+
... NP -> Det N | NP PP
|
| 15 |
+
... VP -> V NP | VP PP
|
| 16 |
+
... Det -> 'a' | 'the'
|
| 17 |
+
... N -> 'dog' | 'cat'
|
| 18 |
+
... V -> 'chased' | 'sat'
|
| 19 |
+
... P -> 'on' | 'in'
|
| 20 |
+
... """)
|
| 21 |
+
>>> grammar
|
| 22 |
+
<Grammar with 14 productions>
|
| 23 |
+
>>> grammar.start()
|
| 24 |
+
S
|
| 25 |
+
>>> grammar.productions()
|
| 26 |
+
[S -> NP VP, PP -> P NP, NP -> Det N, NP -> NP PP, VP -> V NP, VP -> VP PP,
|
| 27 |
+
Det -> 'a', Det -> 'the', N -> 'dog', N -> 'cat', V -> 'chased', V -> 'sat',
|
| 28 |
+
P -> 'on', P -> 'in']
|
| 29 |
+
|
| 30 |
+
Probabilistic CFGs:
|
| 31 |
+
|
| 32 |
+
>>> from nltk import PCFG
|
| 33 |
+
>>> toy_pcfg1 = PCFG.fromstring("""
|
| 34 |
+
... S -> NP VP [1.0]
|
| 35 |
+
... NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
|
| 36 |
+
... Det -> 'the' [0.8] | 'my' [0.2]
|
| 37 |
+
... N -> 'man' [0.5] | 'telescope' [0.5]
|
| 38 |
+
... VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
|
| 39 |
+
... V -> 'ate' [0.35] | 'saw' [0.65]
|
| 40 |
+
... PP -> P NP [1.0]
|
| 41 |
+
... P -> 'with' [0.61] | 'under' [0.39]
|
| 42 |
+
... """)
|
| 43 |
+
|
| 44 |
+
Chomsky Normal Form grammar (Test for bug 474)
|
| 45 |
+
|
| 46 |
+
>>> g = CFG.fromstring("VP^<TOP> -> VBP NP^<VP-TOP>")
|
| 47 |
+
>>> g.productions()[0].lhs()
|
| 48 |
+
VP^<TOP>
|
| 49 |
+
|
| 50 |
+
Grammars can contain both empty strings and empty productions:
|
| 51 |
+
|
| 52 |
+
>>> from nltk.grammar import CFG
|
| 53 |
+
>>> from nltk.parse.generate import generate
|
| 54 |
+
>>> grammar = CFG.fromstring("""
|
| 55 |
+
... S -> A B
|
| 56 |
+
... A -> 'a'
|
| 57 |
+
... # An empty string:
|
| 58 |
+
... B -> 'b' | ''
|
| 59 |
+
... """)
|
| 60 |
+
>>> list(generate(grammar))
|
| 61 |
+
[['a', 'b'], ['a', '']]
|
| 62 |
+
>>> grammar = CFG.fromstring("""
|
| 63 |
+
... S -> A B
|
| 64 |
+
... A -> 'a'
|
| 65 |
+
... # An empty production:
|
| 66 |
+
... B -> 'b' |
|
| 67 |
+
... """)
|
| 68 |
+
>>> list(generate(grammar))
|
| 69 |
+
[['a', 'b'], ['a']]
|
lib/python3.10/site-packages/nltk/test/grammartestsuites.doctest
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
==========================
|
| 5 |
+
Test Suites for Grammars
|
| 6 |
+
==========================
|
| 7 |
+
|
| 8 |
+
Sentences in the test suite are divided into two classes:
|
| 9 |
+
|
| 10 |
+
- grammatical (*accept*) and
|
| 11 |
+
- ungrammatical (*reject*).
|
| 12 |
+
|
| 13 |
+
If a sentence should parse according to the grammar, the value of
|
| 14 |
+
``trees`` will be a non-empty list. If a sentence should be rejected
|
| 15 |
+
according to the grammar, then the value of ``trees`` will be ``None``.
|
| 16 |
+
|
| 17 |
+
>>> from nltk.parse import TestGrammar
|
| 18 |
+
>>> germantest1 = {}
|
| 19 |
+
>>> germantest1['doc'] = "Tests for person agreement"
|
| 20 |
+
>>> germantest1['accept'] = [
|
| 21 |
+
... 'ich komme',
|
| 22 |
+
... 'ich sehe mich',
|
| 23 |
+
... 'du kommst',
|
| 24 |
+
... 'du siehst mich',
|
| 25 |
+
... 'sie kommt',
|
| 26 |
+
... 'sie sieht mich',
|
| 27 |
+
... 'ihr kommt',
|
| 28 |
+
... 'wir kommen',
|
| 29 |
+
... 'sie kommen',
|
| 30 |
+
... 'du magst mich',
|
| 31 |
+
... 'er mag mich',
|
| 32 |
+
... 'du folgst mir',
|
| 33 |
+
... 'sie hilft mir',
|
| 34 |
+
... ]
|
| 35 |
+
>>> germantest1['reject'] = [
|
| 36 |
+
... 'ich kommt',
|
| 37 |
+
... 'ich kommst',
|
| 38 |
+
... 'ich siehst mich',
|
| 39 |
+
... 'du komme',
|
| 40 |
+
... 'du sehe mich',
|
| 41 |
+
... 'du kommt',
|
| 42 |
+
... 'er komme',
|
| 43 |
+
... 'er siehst mich',
|
| 44 |
+
... 'wir komme',
|
| 45 |
+
... 'wir kommst',
|
| 46 |
+
... 'die Katzen kommst',
|
| 47 |
+
... 'sie komme',
|
| 48 |
+
... 'sie kommst',
|
| 49 |
+
... 'du mag mich',
|
| 50 |
+
... 'er magst mich',
|
| 51 |
+
... 'du folgt mir',
|
| 52 |
+
... 'sie hilfst mir',
|
| 53 |
+
... ]
|
| 54 |
+
>>> germantest2 = {}
|
| 55 |
+
>>> germantest2['doc'] = "Tests for number agreement"
|
| 56 |
+
>>> germantest2['accept'] = [
|
| 57 |
+
... 'der Hund kommt',
|
| 58 |
+
... 'die Hunde kommen',
|
| 59 |
+
... 'ich komme',
|
| 60 |
+
... 'wir kommen',
|
| 61 |
+
... 'ich sehe die Katzen',
|
| 62 |
+
... 'ich folge den Katzen',
|
| 63 |
+
... 'ich sehe die Katzen',
|
| 64 |
+
... 'ich folge den Katzen',
|
| 65 |
+
... 'wir sehen die Katzen',
|
| 66 |
+
... 'wir folgen den Katzen'
|
| 67 |
+
... ]
|
| 68 |
+
>>> germantest2['reject'] = [
|
| 69 |
+
... 'ich kommen',
|
| 70 |
+
... 'wir komme',
|
| 71 |
+
... 'der Hunde kommt',
|
| 72 |
+
... 'der Hunde kommen',
|
| 73 |
+
... 'die Katzen kommt',
|
| 74 |
+
... 'ich sehe der Hunde',
|
| 75 |
+
... 'ich folge den Hund',
|
| 76 |
+
... 'ich sehen der Hunde',
|
| 77 |
+
... 'ich folgen den Hund',
|
| 78 |
+
... 'wir sehe die Katzen',
|
| 79 |
+
... 'wir folge den Katzen'
|
| 80 |
+
... ]
|
| 81 |
+
>>> germantest3 = {}
|
| 82 |
+
>>> germantest3['doc'] = "Tests for case government and subcategorization"
|
| 83 |
+
>>> germantest3['accept'] = [
|
| 84 |
+
... 'der Hund sieht mich',
|
| 85 |
+
... 'der Hund kommt',
|
| 86 |
+
... 'ich sehe den Hund',
|
| 87 |
+
... 'ich helfe dem Hund',
|
| 88 |
+
... ]
|
| 89 |
+
>>> germantest3['reject'] = [
|
| 90 |
+
... 'ich sehe',
|
| 91 |
+
... 'ich helfe',
|
| 92 |
+
... 'ich komme den Hund',
|
| 93 |
+
... 'ich sehe den Hund die Katzen',
|
| 94 |
+
... 'du hilfst mich',
|
| 95 |
+
... 'du siehst mir',
|
| 96 |
+
... 'du siehst ich',
|
| 97 |
+
... 'der Hunde kommt mich',
|
| 98 |
+
... 'die Hunde sehe die Hunde',
|
| 99 |
+
... 'der Hund sehe die Hunde',
|
| 100 |
+
... 'ich hilft den Hund',
|
| 101 |
+
... 'ich hilft der Hund',
|
| 102 |
+
... 'ich sehe dem Hund',
|
| 103 |
+
... ]
|
| 104 |
+
>>> germantestsuites = [germantest1, germantest2, germantest3]
|
| 105 |
+
>>> tester = TestGrammar('grammars/book_grammars/german.fcfg', germantestsuites)
|
| 106 |
+
>>> tester.run()
|
| 107 |
+
Tests for person agreement: All tests passed!
|
| 108 |
+
Tests for number agreement: All tests passed!
|
| 109 |
+
Tests for case government and subcategorization: All tests passed!
|
lib/python3.10/site-packages/nltk/test/japanese.doctest
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
============================
|
| 5 |
+
Japanese Language Processing
|
| 6 |
+
============================
|
| 7 |
+
|
| 8 |
+
>>> from nltk import *
|
| 9 |
+
|
| 10 |
+
-------------
|
| 11 |
+
Corpus Access
|
| 12 |
+
-------------
|
| 13 |
+
|
| 14 |
+
KNB Corpus
|
| 15 |
+
----------
|
| 16 |
+
|
| 17 |
+
>>> from nltk.corpus import knbc
|
| 18 |
+
|
| 19 |
+
Access the words: this should produce a list of strings:
|
| 20 |
+
|
| 21 |
+
>>> type(knbc.words()[0]) is not bytes
|
| 22 |
+
True
|
| 23 |
+
|
| 24 |
+
Access the sentences: this should produce a list of lists of strings:
|
| 25 |
+
|
| 26 |
+
>>> type(knbc.sents()[0][0]) is not bytes
|
| 27 |
+
True
|
| 28 |
+
|
| 29 |
+
Access the tagged words: this should produce a list of word, tag pairs:
|
| 30 |
+
|
| 31 |
+
>>> type(knbc.tagged_words()[0])
|
| 32 |
+
<... 'tuple'>
|
| 33 |
+
|
| 34 |
+
Access the tagged sentences: this should produce a list of lists of word, tag pairs:
|
| 35 |
+
|
| 36 |
+
>>> type(knbc.tagged_sents()[0][0])
|
| 37 |
+
<... 'tuple'>
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
JEITA Corpus
|
| 41 |
+
------------
|
| 42 |
+
|
| 43 |
+
>>> from nltk.corpus import jeita
|
| 44 |
+
|
| 45 |
+
Access the tagged words: this should produce a list of word, tag pairs, where a tag is a string:
|
| 46 |
+
|
| 47 |
+
>>> type(jeita.tagged_words()[0][1]) is not bytes
|
| 48 |
+
True
|
lib/python3.10/site-packages/nltk/test/lm.doctest
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
.. -*- coding: utf-8 -*-
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
Regression Tests
|
| 8 |
+
================
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
Issue 167
|
| 12 |
+
---------
|
| 13 |
+
https://github.com/nltk/nltk/issues/167
|
| 14 |
+
|
| 15 |
+
>>> from nltk.corpus import brown
|
| 16 |
+
>>> from nltk.lm.preprocessing import padded_everygram_pipeline
|
| 17 |
+
>>> ngram_order = 3
|
| 18 |
+
>>> train_data, vocab_data = padded_everygram_pipeline(
|
| 19 |
+
... ngram_order,
|
| 20 |
+
... brown.sents(categories="news")
|
| 21 |
+
... )
|
| 22 |
+
|
| 23 |
+
>>> from nltk.lm import WittenBellInterpolated
|
| 24 |
+
>>> lm = WittenBellInterpolated(ngram_order)
|
| 25 |
+
>>> lm.fit(train_data, vocab_data)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
Sentence containing an unseen word should result in infinite entropy because
|
| 31 |
+
Witten-Bell is based ultimately on MLE, which cannot handle unseen ngrams.
|
| 32 |
+
Crucially, it shouldn't raise any exceptions for unseen words.
|
| 33 |
+
|
| 34 |
+
>>> from nltk.util import ngrams
|
| 35 |
+
>>> sent = ngrams("This is a sentence with the word aaddvark".split(), 3)
|
| 36 |
+
>>> lm.entropy(sent)
|
| 37 |
+
inf
|
| 38 |
+
|
| 39 |
+
If we remove all unseen ngrams from the sentence, we'll get a non-infinite value
|
| 40 |
+
for the entropy.
|
| 41 |
+
|
| 42 |
+
>>> sent = ngrams("This is a sentence".split(), 3)
|
| 43 |
+
>>> round(lm.entropy(sent), 14)
|
| 44 |
+
10.23701322869105
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
Issue 367
|
| 48 |
+
---------
|
| 49 |
+
https://github.com/nltk/nltk/issues/367
|
| 50 |
+
|
| 51 |
+
Reproducing Dan Blanchard's example:
|
| 52 |
+
https://github.com/nltk/nltk/issues/367#issuecomment-14646110
|
| 53 |
+
|
| 54 |
+
>>> from nltk.lm import Lidstone, Vocabulary
|
| 55 |
+
>>> word_seq = list('aaaababaaccbacb')
|
| 56 |
+
>>> ngram_order = 2
|
| 57 |
+
>>> from nltk.util import everygrams
|
| 58 |
+
>>> train_data = [everygrams(word_seq, max_len=ngram_order)]
|
| 59 |
+
>>> V = Vocabulary(['a', 'b', 'c', ''])
|
| 60 |
+
>>> lm = Lidstone(0.2, ngram_order, vocabulary=V)
|
| 61 |
+
>>> lm.fit(train_data)
|
| 62 |
+
|
| 63 |
+
For doctest to work we have to sort the vocabulary keys.
|
| 64 |
+
|
| 65 |
+
>>> V_keys = sorted(V)
|
| 66 |
+
>>> round(sum(lm.score(w, ("b",)) for w in V_keys), 6)
|
| 67 |
+
1.0
|
| 68 |
+
>>> round(sum(lm.score(w, ("a",)) for w in V_keys), 6)
|
| 69 |
+
1.0
|
| 70 |
+
|
| 71 |
+
>>> [lm.score(w, ("b",)) for w in V_keys]
|
| 72 |
+
[0.05, 0.05, 0.8, 0.05, 0.05]
|
| 73 |
+
>>> [round(lm.score(w, ("a",)), 4) for w in V_keys]
|
| 74 |
+
[0.0222, 0.0222, 0.4667, 0.2444, 0.2444]
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
Here's reproducing @afourney's comment:
|
| 78 |
+
https://github.com/nltk/nltk/issues/367#issuecomment-15686289
|
| 79 |
+
|
| 80 |
+
>>> sent = ['foo', 'foo', 'foo', 'foo', 'bar', 'baz']
|
| 81 |
+
>>> ngram_order = 3
|
| 82 |
+
>>> from nltk.lm.preprocessing import padded_everygram_pipeline
|
| 83 |
+
>>> train_data, vocab_data = padded_everygram_pipeline(ngram_order, [sent])
|
| 84 |
+
>>> from nltk.lm import Lidstone
|
| 85 |
+
>>> lm = Lidstone(0.2, ngram_order)
|
| 86 |
+
>>> lm.fit(train_data, vocab_data)
|
| 87 |
+
|
| 88 |
+
The vocabulary includes the "UNK" symbol as well as two padding symbols.
|
| 89 |
+
|
| 90 |
+
>>> len(lm.vocab)
|
| 91 |
+
6
|
| 92 |
+
>>> word = "foo"
|
| 93 |
+
>>> context = ("bar", "baz")
|
| 94 |
+
|
| 95 |
+
The raw counts.
|
| 96 |
+
|
| 97 |
+
>>> lm.context_counts(context)[word]
|
| 98 |
+
0
|
| 99 |
+
>>> lm.context_counts(context).N()
|
| 100 |
+
1
|
| 101 |
+
|
| 102 |
+
Counts with Lidstone smoothing.
|
| 103 |
+
|
| 104 |
+
>>> lm.context_counts(context)[word] + lm.gamma
|
| 105 |
+
0.2
|
| 106 |
+
>>> lm.context_counts(context).N() + len(lm.vocab) * lm.gamma
|
| 107 |
+
2.2
|
| 108 |
+
|
| 109 |
+
Without any backoff, just using Lidstone smoothing, P("foo" | "bar", "baz") should be:
|
| 110 |
+
0.2 / 2.2 ~= 0.090909
|
| 111 |
+
|
| 112 |
+
>>> round(lm.score(word, context), 6)
|
| 113 |
+
0.090909
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
Issue 380
|
| 117 |
+
---------
|
| 118 |
+
https://github.com/nltk/nltk/issues/380
|
| 119 |
+
|
| 120 |
+
Reproducing setup akin to this comment:
|
| 121 |
+
https://github.com/nltk/nltk/issues/380#issue-12879030
|
| 122 |
+
|
| 123 |
+
For speed take only the first 100 sentences of reuters. Shouldn't affect the test.
|
| 124 |
+
|
| 125 |
+
>>> from nltk.corpus import reuters
|
| 126 |
+
>>> sents = reuters.sents()[:100]
|
| 127 |
+
>>> ngram_order = 3
|
| 128 |
+
>>> from nltk.lm.preprocessing import padded_everygram_pipeline
|
| 129 |
+
>>> train_data, vocab_data = padded_everygram_pipeline(ngram_order, sents)
|
| 130 |
+
|
| 131 |
+
>>> from nltk.lm import Lidstone
|
| 132 |
+
>>> lm = Lidstone(0.2, ngram_order)
|
| 133 |
+
>>> lm.fit(train_data, vocab_data)
|
| 134 |
+
>>> lm.score("said", ("",)) < 1
|
| 135 |
+
True
|
lib/python3.10/site-packages/nltk/test/meteor.doctest
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
.. -*- coding: utf-8 -*-
|
| 5 |
+
|
| 6 |
+
=============
|
| 7 |
+
METEOR tests
|
| 8 |
+
=============
|
| 9 |
+
|
| 10 |
+
No Alignment test
|
| 11 |
+
------------------
|
| 12 |
+
|
| 13 |
+
>>> from nltk.translate import meteor
|
| 14 |
+
>>> from nltk import word_tokenize
|
| 15 |
+
|
| 16 |
+
If the candidate has no alignment to any of the references, the METEOR score is 0.
|
| 17 |
+
|
| 18 |
+
>>> round(meteor(
|
| 19 |
+
... [word_tokenize('The candidate has no alignment to any of the references')],
|
| 20 |
+
... word_tokenize('John loves Mary')
|
| 21 |
+
... ), 4)
|
| 22 |
+
0.0
|
| 23 |
+
|
| 24 |
+
Tests based on wikipedia examples
|
| 25 |
+
---------------------------------
|
| 26 |
+
|
| 27 |
+
Testing on `wikipedia examples <https://en.wikipedia.org/wiki/METEOR#Examples>`_
|
| 28 |
+
|
| 29 |
+
>>> same_res = round(meteor(
|
| 30 |
+
... [word_tokenize('The cat sat on the mat')],
|
| 31 |
+
... word_tokenize('The cat sat on the mat')
|
| 32 |
+
... ), 4)
|
| 33 |
+
>>> abs(same_res - 0.9977) < 1e-2
|
| 34 |
+
True
|
| 35 |
+
|
| 36 |
+
>>> meteor(
|
| 37 |
+
... [word_tokenize('The cat sat on the mat')],
|
| 38 |
+
... word_tokenize('on the mat sat the cat')
|
| 39 |
+
... )
|
| 40 |
+
0.5
|
| 41 |
+
|
| 42 |
+
>>> round(meteor(
|
| 43 |
+
... [word_tokenize('The cat sat on the mat')],
|
| 44 |
+
... word_tokenize('The cat was sat on the mat')
|
| 45 |
+
... ), 4)
|
| 46 |
+
0.9654
|
| 47 |
+
|
| 48 |
+
Test corresponding to issue #2751, where METEOR score > 1
|
| 49 |
+
|
| 50 |
+
>>> round(meteor(
|
| 51 |
+
... [word_tokenize('create or update a vm set')],
|
| 52 |
+
... word_tokenize('creates or updates a virtual machine scale set')
|
| 53 |
+
... ), 4)
|
| 54 |
+
0.7806
|
lib/python3.10/site-packages/nltk/test/nonmonotonic.doctest
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2023 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
======================
|
| 5 |
+
Nonmonotonic Reasoning
|
| 6 |
+
======================
|
| 7 |
+
|
| 8 |
+
>>> from nltk.test.setup_fixt import check_binary
|
| 9 |
+
>>> check_binary('mace4')
|
| 10 |
+
|
| 11 |
+
>>> from nltk import *
|
| 12 |
+
>>> from nltk.inference.nonmonotonic import *
|
| 13 |
+
>>> from nltk.sem import logic
|
| 14 |
+
>>> logic._counter._value = 0
|
| 15 |
+
>>> read_expr = logic.Expression.fromstring
|
| 16 |
+
|
| 17 |
+
------------------------
|
| 18 |
+
Closed Domain Assumption
|
| 19 |
+
------------------------
|
| 20 |
+
|
| 21 |
+
The only entities in the domain are those found in the assumptions or goal.
|
| 22 |
+
If the domain only contains "A" and "B", then the expression "exists x.P(x)" can
|
| 23 |
+
be replaced with "P(A) | P(B)" and an expression "all x.P(x)" can be replaced
|
| 24 |
+
with "P(A) & P(B)".
|
| 25 |
+
|
| 26 |
+
>>> p1 = read_expr(r'all x.(man(x) -> mortal(x))')
|
| 27 |
+
>>> p2 = read_expr(r'man(Socrates)')
|
| 28 |
+
>>> c = read_expr(r'mortal(Socrates)')
|
| 29 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
| 30 |
+
>>> prover.prove()
|
| 31 |
+
True
|
| 32 |
+
>>> cdp = ClosedDomainProver(prover)
|
| 33 |
+
>>> for a in cdp.assumptions(): print(a) # doctest: +SKIP
|
| 34 |
+
(man(Socrates) -> mortal(Socrates))
|
| 35 |
+
man(Socrates)
|
| 36 |
+
>>> cdp.prove()
|
| 37 |
+
True
|
| 38 |
+
|
| 39 |
+
>>> p1 = read_expr(r'exists x.walk(x)')
|
| 40 |
+
>>> p2 = read_expr(r'man(Socrates)')
|
| 41 |
+
>>> c = read_expr(r'walk(Socrates)')
|
| 42 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
| 43 |
+
>>> prover.prove()
|
| 44 |
+
False
|
| 45 |
+
>>> cdp = ClosedDomainProver(prover)
|
| 46 |
+
>>> for a in cdp.assumptions(): print(a) # doctest: +SKIP
|
| 47 |
+
walk(Socrates)
|
| 48 |
+
man(Socrates)
|
| 49 |
+
>>> cdp.prove()
|
| 50 |
+
True
|
| 51 |
+
|
| 52 |
+
>>> p1 = read_expr(r'exists x.walk(x)')
|
| 53 |
+
>>> p2 = read_expr(r'man(Socrates)')
|
| 54 |
+
>>> p3 = read_expr(r'-walk(Bill)')
|
| 55 |
+
>>> c = read_expr(r'walk(Socrates)')
|
| 56 |
+
>>> prover = Prover9Command(c, [p1,p2,p3])
|
| 57 |
+
>>> prover.prove()
|
| 58 |
+
False
|
| 59 |
+
>>> cdp = ClosedDomainProver(prover)
|
| 60 |
+
>>> for a in cdp.assumptions(): print(a) # doctest: +SKIP
|
| 61 |
+
(walk(Socrates) | walk(Bill))
|
| 62 |
+
man(Socrates)
|
| 63 |
+
-walk(Bill)
|
| 64 |
+
>>> cdp.prove()
|
| 65 |
+
True
|
| 66 |
+
|
| 67 |
+
>>> p1 = read_expr(r'walk(Socrates)')
|
| 68 |
+
>>> p2 = read_expr(r'walk(Bill)')
|
| 69 |
+
>>> c = read_expr(r'all x.walk(x)')
|
| 70 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
| 71 |
+
>>> prover.prove()
|
| 72 |
+
False
|
| 73 |
+
>>> cdp = ClosedDomainProver(prover)
|
| 74 |
+
>>> for a in cdp.assumptions(): print(a) # doctest: +SKIP
|
| 75 |
+
walk(Socrates)
|
| 76 |
+
walk(Bill)
|
| 77 |
+
>>> print(cdp.goal()) # doctest: +SKIP
|
| 78 |
+
(walk(Socrates) & walk(Bill))
|
| 79 |
+
>>> cdp.prove()
|
| 80 |
+
True
|
| 81 |
+
|
| 82 |
+
>>> p1 = read_expr(r'girl(mary)')
|
| 83 |
+
>>> p2 = read_expr(r'dog(rover)')
|
| 84 |
+
>>> p3 = read_expr(r'all x.(girl(x) -> -dog(x))')
|
| 85 |
+
>>> p4 = read_expr(r'all x.(dog(x) -> -girl(x))')
|
| 86 |
+
>>> p5 = read_expr(r'chase(mary, rover)')
|
| 87 |
+
>>> c = read_expr(r'exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))')
|
| 88 |
+
>>> prover = Prover9Command(c, [p1,p2,p3,p4,p5])
|
| 89 |
+
>>> print(prover.prove())
|
| 90 |
+
False
|
| 91 |
+
>>> cdp = ClosedDomainProver(prover)
|
| 92 |
+
>>> for a in cdp.assumptions(): print(a) # doctest: +SKIP
|
| 93 |
+
girl(mary)
|
| 94 |
+
dog(rover)
|
| 95 |
+
((girl(rover) -> -dog(rover)) & (girl(mary) -> -dog(mary)))
|
| 96 |
+
((dog(rover) -> -girl(rover)) & (dog(mary) -> -girl(mary)))
|
| 97 |
+
chase(mary,rover)
|
| 98 |
+
>>> print(cdp.goal()) # doctest: +SKIP
|
| 99 |
+
((dog(rover) & (girl(rover) -> chase(rover,rover)) & (girl(mary) -> chase(mary,rover))) | (dog(mary) & (girl(rover) -> chase(rover,mary)) & (girl(mary) -> chase(mary,mary))))
|
| 100 |
+
>>> print(cdp.prove())
|
| 101 |
+
True
|
| 102 |
+
|
| 103 |
+
-----------------------
|
| 104 |
+
Unique Names Assumption
|
| 105 |
+
-----------------------
|
| 106 |
+
|
| 107 |
+
No two entities in the domain represent the same entity unless it can be
|
| 108 |
+
explicitly proven that they do. Therefore, if the domain contains "A" and "B",
|
| 109 |
+
then add the assumption "-(A = B)" if it is not the case that
|
| 110 |
+
"<assumptions> \|- (A = B)".
|
| 111 |
+
|
| 112 |
+
>>> p1 = read_expr(r'man(Socrates)')
|
| 113 |
+
>>> p2 = read_expr(r'man(Bill)')
|
| 114 |
+
>>> c = read_expr(r'exists x.exists y.-(x = y)')
|
| 115 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
| 116 |
+
>>> prover.prove()
|
| 117 |
+
False
|
| 118 |
+
>>> unp = UniqueNamesProver(prover)
|
| 119 |
+
>>> for a in unp.assumptions(): print(a) # doctest: +SKIP
|
| 120 |
+
man(Socrates)
|
| 121 |
+
man(Bill)
|
| 122 |
+
-(Socrates = Bill)
|
| 123 |
+
>>> unp.prove()
|
| 124 |
+
True
|
| 125 |
+
|
| 126 |
+
>>> p1 = read_expr(r'all x.(walk(x) -> (x = Socrates))')
|
| 127 |
+
>>> p2 = read_expr(r'Bill = William')
|
| 128 |
+
>>> p3 = read_expr(r'Bill = Billy')
|
| 129 |
+
>>> c = read_expr(r'-walk(William)')
|
| 130 |
+
>>> prover = Prover9Command(c, [p1,p2,p3])
|
| 131 |
+
>>> prover.prove()
|
| 132 |
+
False
|
| 133 |
+
>>> unp = UniqueNamesProver(prover)
|
| 134 |
+
>>> for a in unp.assumptions(): print(a) # doctest: +SKIP
|
| 135 |
+
all x.(walk(x) -> (x = Socrates))
|
| 136 |
+
(Bill = William)
|
| 137 |
+
(Bill = Billy)
|
| 138 |
+
-(William = Socrates)
|
| 139 |
+
-(Billy = Socrates)
|
| 140 |
+
-(Socrates = Bill)
|
| 141 |
+
>>> unp.prove()
|
| 142 |
+
True
|
| 143 |
+
|
| 144 |
+
-----------------------
|
| 145 |
+
Closed World Assumption
|
| 146 |
+
-----------------------
|
| 147 |
+
|
| 148 |
+
The only entities that have certain properties are those that is it stated
|
| 149 |
+
have the properties. We accomplish this assumption by "completing" predicates.
|
| 150 |
+
|
| 151 |
+
If the assumptions contain "P(A)", then "all x.(P(x) -> (x=A))" is the completion
|
| 152 |
+
of "P". If the assumptions contain "all x.(ostrich(x) -> bird(x))", then
|
| 153 |
+
"all x.(bird(x) -> ostrich(x))" is the completion of "bird". If the
|
| 154 |
+
assumptions don't contain anything that are "P", then "all x.-P(x)" is the
|
| 155 |
+
completion of "P".
|
| 156 |
+
|
| 157 |
+
>>> p1 = read_expr(r'walk(Socrates)')
|
| 158 |
+
>>> p2 = read_expr(r'-(Socrates = Bill)')
|
| 159 |
+
>>> c = read_expr(r'-walk(Bill)')
|
| 160 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
| 161 |
+
>>> prover.prove()
|
| 162 |
+
False
|
| 163 |
+
>>> cwp = ClosedWorldProver(prover)
|
| 164 |
+
>>> for a in cwp.assumptions(): print(a) # doctest: +SKIP
|
| 165 |
+
walk(Socrates)
|
| 166 |
+
-(Socrates = Bill)
|
| 167 |
+
all z1.(walk(z1) -> (z1 = Socrates))
|
| 168 |
+
>>> cwp.prove()
|
| 169 |
+
True
|
| 170 |
+
|
| 171 |
+
>>> p1 = read_expr(r'see(Socrates, John)')
|
| 172 |
+
>>> p2 = read_expr(r'see(John, Mary)')
|
| 173 |
+
>>> p3 = read_expr(r'-(Socrates = John)')
|
| 174 |
+
>>> p4 = read_expr(r'-(John = Mary)')
|
| 175 |
+
>>> c = read_expr(r'-see(Socrates, Mary)')
|
| 176 |
+
>>> prover = Prover9Command(c, [p1,p2,p3,p4])
|
| 177 |
+
>>> prover.prove()
|
| 178 |
+
False
|
| 179 |
+
>>> cwp = ClosedWorldProver(prover)
|
| 180 |
+
>>> for a in cwp.assumptions(): print(a) # doctest: +SKIP
|
| 181 |
+
see(Socrates,John)
|
| 182 |
+
see(John,Mary)
|
| 183 |
+
-(Socrates = John)
|
| 184 |
+
-(John = Mary)
|
| 185 |
+
all z3 z4.(see(z3,z4) -> (((z3 = Socrates) & (z4 = John)) | ((z3 = John) & (z4 = Mary))))
|
| 186 |
+
>>> cwp.prove()
|
| 187 |
+
True
|
| 188 |
+
|
| 189 |
+
>>> p1 = read_expr(r'all x.(ostrich(x) -> bird(x))')
|
| 190 |
+
>>> p2 = read_expr(r'bird(Tweety)')
|
| 191 |
+
>>> p3 = read_expr(r'-ostrich(Sam)')
|
| 192 |
+
>>> p4 = read_expr(r'Sam != Tweety')
|
| 193 |
+
>>> c = read_expr(r'-bird(Sam)')
|
| 194 |
+
>>> prover = Prover9Command(c, [p1,p2,p3,p4])
|
| 195 |
+
>>> prover.prove()
|
| 196 |
+
False
|
| 197 |
+
>>> cwp = ClosedWorldProver(prover)
|
| 198 |
+
>>> for a in cwp.assumptions(): print(a) # doctest: +SKIP
|
| 199 |
+
all x.(ostrich(x) -> bird(x))
|
| 200 |
+
bird(Tweety)
|
| 201 |
+
-ostrich(Sam)
|
| 202 |
+
-(Sam = Tweety)
|
| 203 |
+
all z7.-ostrich(z7)
|
| 204 |
+
all z8.(bird(z8) -> ((z8 = Tweety) | ostrich(z8)))
|
| 205 |
+
>>> print(cwp.prove())
|
| 206 |
+
True
|
| 207 |
+
|
| 208 |
+
-----------------------
|
| 209 |
+
Multi-Decorator Example
|
| 210 |
+
-----------------------
|
| 211 |
+
|
| 212 |
+
Decorators can be nested to utilize multiple assumptions.
|
| 213 |
+
|
| 214 |
+
>>> p1 = read_expr(r'see(Socrates, John)')
|
| 215 |
+
>>> p2 = read_expr(r'see(John, Mary)')
|
| 216 |
+
>>> c = read_expr(r'-see(Socrates, Mary)')
|
| 217 |
+
>>> prover = Prover9Command(c, [p1,p2])
|
| 218 |
+
>>> print(prover.prove())
|
| 219 |
+
False
|
| 220 |
+
>>> cmd = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover)))
|
| 221 |
+
>>> print(cmd.prove())
|
| 222 |
+
True
|
| 223 |
+
|
| 224 |
+
-----------------
|
| 225 |
+
Default Reasoning
|
| 226 |
+
-----------------
|
| 227 |
+
>>> logic._counter._value = 0
|
| 228 |
+
>>> premises = []
|
| 229 |
+
|
| 230 |
+
define the taxonomy
|
| 231 |
+
|
| 232 |
+
>>> premises.append(read_expr(r'all x.(elephant(x) -> animal(x))'))
|
| 233 |
+
>>> premises.append(read_expr(r'all x.(bird(x) -> animal(x))'))
|
| 234 |
+
>>> premises.append(read_expr(r'all x.(dove(x) -> bird(x))'))
|
| 235 |
+
>>> premises.append(read_expr(r'all x.(ostrich(x) -> bird(x))'))
|
| 236 |
+
>>> premises.append(read_expr(r'all x.(flying_ostrich(x) -> ostrich(x))'))
|
| 237 |
+
|
| 238 |
+
default the properties using abnormalities
|
| 239 |
+
|
| 240 |
+
>>> premises.append(read_expr(r'all x.((animal(x) & -Ab1(x)) -> -fly(x))')) #normal animals don't fly
|
| 241 |
+
>>> premises.append(read_expr(r'all x.((bird(x) & -Ab2(x)) -> fly(x))')) #normal birds fly
|
| 242 |
+
>>> premises.append(read_expr(r'all x.((ostrich(x) & -Ab3(x)) -> -fly(x))')) #normal ostriches don't fly
|
| 243 |
+
|
| 244 |
+
specify abnormal entities
|
| 245 |
+
|
| 246 |
+
>>> premises.append(read_expr(r'all x.(bird(x) -> Ab1(x))')) #flight
|
| 247 |
+
>>> premises.append(read_expr(r'all x.(ostrich(x) -> Ab2(x))')) #non-flying bird
|
| 248 |
+
>>> premises.append(read_expr(r'all x.(flying_ostrich(x) -> Ab3(x))')) #flying ostrich
|
| 249 |
+
|
| 250 |
+
define entities
|
| 251 |
+
|
| 252 |
+
>>> premises.append(read_expr(r'elephant(el)'))
|
| 253 |
+
>>> premises.append(read_expr(r'dove(do)'))
|
| 254 |
+
>>> premises.append(read_expr(r'ostrich(os)'))
|
| 255 |
+
|
| 256 |
+
print the augmented assumptions list
|
| 257 |
+
|
| 258 |
+
>>> prover = Prover9Command(None, premises)
|
| 259 |
+
>>> command = UniqueNamesProver(ClosedWorldProver(prover))
|
| 260 |
+
>>> for a in command.assumptions(): print(a) # doctest: +SKIP
|
| 261 |
+
all x.(elephant(x) -> animal(x))
|
| 262 |
+
all x.(bird(x) -> animal(x))
|
| 263 |
+
all x.(dove(x) -> bird(x))
|
| 264 |
+
all x.(ostrich(x) -> bird(x))
|
| 265 |
+
all x.(flying_ostrich(x) -> ostrich(x))
|
| 266 |
+
all x.((animal(x) & -Ab1(x)) -> -fly(x))
|
| 267 |
+
all x.((bird(x) & -Ab2(x)) -> fly(x))
|
| 268 |
+
all x.((ostrich(x) & -Ab3(x)) -> -fly(x))
|
| 269 |
+
all x.(bird(x) -> Ab1(x))
|
| 270 |
+
all x.(ostrich(x) -> Ab2(x))
|
| 271 |
+
all x.(flying_ostrich(x) -> Ab3(x))
|
| 272 |
+
elephant(el)
|
| 273 |
+
dove(do)
|
| 274 |
+
ostrich(os)
|
| 275 |
+
all z1.(animal(z1) -> (elephant(z1) | bird(z1)))
|
| 276 |
+
all z2.(Ab1(z2) -> bird(z2))
|
| 277 |
+
all z3.(bird(z3) -> (dove(z3) | ostrich(z3)))
|
| 278 |
+
all z4.(dove(z4) -> (z4 = do))
|
| 279 |
+
all z5.(Ab2(z5) -> ostrich(z5))
|
| 280 |
+
all z6.(Ab3(z6) -> flying_ostrich(z6))
|
| 281 |
+
all z7.(ostrich(z7) -> ((z7 = os) | flying_ostrich(z7)))
|
| 282 |
+
all z8.-flying_ostrich(z8)
|
| 283 |
+
all z9.(elephant(z9) -> (z9 = el))
|
| 284 |
+
-(el = os)
|
| 285 |
+
-(el = do)
|
| 286 |
+
-(os = do)
|
| 287 |
+
|
| 288 |
+
>>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('-fly(el)'), premises))).prove()
|
| 289 |
+
True
|
| 290 |
+
>>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('fly(do)'), premises))).prove()
|
| 291 |
+
True
|
| 292 |
+
>>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('-fly(os)'), premises))).prove()
|
| 293 |
+
True
|