code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to manipulate target element information."""
__author__ = 'alexto@google.com (Alexis O. Torres)'
import logging
TARGET_ELEMENT_START = 'target_element:'
TARGET_ELEMENT_END = ':target_element'
TARGET_ELEMENT_CLEAR = '[[BITE_CLEAR_BUG_BINDINGS]]'
def GetTargetElementStr(descr):
"""Gets string with markup around the target element description.
Args:
descr: Str describing an element on the page.
Example:
description = 'id=logo'
target_info = GetTargetElementStr(description)
print target_info
Returns:
A str with markup sorounding the description str.
"""
return '%s %s %s' % (TARGET_ELEMENT_START, descr, TARGET_ELEMENT_END)
def ExtractTargetElement(text):
"""Extract existing target element info from the text string.
Args:
text: Str containing all text information of a bug.
Returns:
The the last target element description found in the text, or empty string.
"""
if not text:
logging.warning('ExtractTargetElement: No text specified.')
return ''
# Only look at target elements that occurred after the last time the user
# cleared the previous target elements.
text = text.split(TARGET_ELEMENT_CLEAR)[-1]
start_index = text.rfind(TARGET_ELEMENT_START)
if start_index < 0:
logging.debug(
'ExtractTargetElement: Did not find target element information.')
return ''
logging.debug('ExtractTargetElement: Found TARGET_ELEMENT_START.')
start_index += len(TARGET_ELEMENT_START)
end_index = text.find(TARGET_ELEMENT_END, start_index)
if end_index < 0:
logging.warning('ExtractTargetElement: Did not find TARGET_ELEMENT_END.')
return ''
logging.debug('ExtractTargetElement: Found TARGET_ELEMENT_END.')
target_element = text[start_index : end_index].strip()
logging.debug(
'ExtractTargetElement: target_element information: %s', target_element)
return target_element
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for encoding and decoding strings."""
__author__ = 'alexis.torrs@gmail.com (Alexis O. Torres)'
def EncodeToAscii(value):
"""Needed to encode unicode into a datastore supported encoding.
Args:
value: String to encode.
Returns:
An utf-8 encoded string.
"""
if not value:
return value
try:
result = value.encode('ascii', 'ignore')
except UnicodeDecodeError:
logging.debug('String contains unicode characters, normalizing')
new_str = unicode(value, encoding='utf-8', errors='ignore')
result = new_str.encode('ascii', 'ignore')
return result
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bite constants."""
__author__ = 'phu@google.com (Po Hu)'
#Use implict string concat
#pylint: disable-msg=C6406
EVERYTHING_DEFAULT_UI_DATA = {
'name': 'all',
'href': '/event/show_all',
'title': 'Everything',
'artifacttitle': 'Recent Activities',
'icon': '/images/artifacts/testing.png',
'description': 'This page shows you the recent activities that are ' +
'performed',
'data': [],
'filters': [
{'name': 'all', 'title': 'All items',
'href': '/event/show_all',
'selected': True}]
}
SUITES_DEFAULT_UI_DATA = {
'name': 'suites',
'href': '/suite/show_all',
'title': 'Sets',
'icon': '/images/artifacts/testsuite.png',
'description': 'Sets are a collection of tests intended to ' +
'be run under a specific set of configuration(s).',
'data': [],
'filters': [
{'name': 'all', 'title': 'All items',
'href': '/suite/show_all',
'selected': True}
]
}
RUNS_DEFAULT_UI_DATA = {
'name': 'runs',
'title': 'Runs',
'href': '/run/show_all',
'icon': '/images/artifacts/testrun.png',
'description': 'Runs are an execution of a set of tests. ' +
'Results are stored in the test case manager.',
'filters': [{'name': 'all',
'title': 'All items',
'href': '/run/show_all?filter=all'},
{'name': 'completed',
'title': 'Completed',
'href': '/run/show_all?filter=completed'},
{'name': 'running',
'title': 'Running',
'href': '/run/show_all?filter=running'},
{'name': 'scheduled',
'title': 'Scheduled',
'href': '/run/show_all?filter=scheduled'}]
}
NAV_DEFAULT_DATA = [
{'title': 'Specs'},
{'title': 'Code'},
{'title': 'Tests',
'href': '/suite/show_all',
'selected': True},
{'title': 'Admin'}
]
SCOPE_DEFAULT_DATA = {
'name': 'runs',
'scopes': [
{'name': 'all',
'title': 'Everything',
'href': '/event/show_all'},
{'name': 'suites',
'title': 'Sets',
'href': '/suite/show_all'},
{'name': 'runs',
'title': 'Runs',
'href': '/run/show_all'}]
}
EVENT_ACTION_TO_READABLE = {
'create': 'was created',
'modify': 'was modified',
'pass': 'was passed',
'fail': 'was passed',
'start': 'was started',
'schedule': 'was scheduled',
'delete': 'was deleted',
'complete': 'was completed'
}
| Python |
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common settings for the AppEngine front-end."""
__author__ = 'michaelwill@google.com (Michael Williamson)'
import os
try:
PORT = os.environ['SERVER_PORT']
SERVER_NAME = os.environ['SERVER_NAME']
except KeyError:
# Both of these environment variables are not defined in unit
# tests, so we set them to something reasonable.
PORT = '8080'
SERVER_NAME = 'localhost'
if PORT:
HOST_NAME_AND_PORT = '%s:%s' % (SERVER_NAME, PORT)
else:
HOST_NAME_AND_PORT = SERVER_NAME
STORAGE_GMAIL_ACCOUNT = 'bite.storage@gmail.com'
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
"""Server root."""
__author__ = 'jason.stredwick@gmail.com (Jason Stredwick)'
import os
ROOT = os.path.dirname(__file__)
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: jasonstredwick@google.com (Jason Stredwick)
#
# Common build definitions for packages within BITE's server code base.
# Commonly used unit testing support. To use, depend on this library.
TESTING_DEPS = [
'//apphosting/ext/webapp',
'//pyglib',
'//testing/pybase',
'//third_party/py/mox',
]
| Python |
# -*- mode: python; -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: jasonstredwick@google.com (Jason Stredwick)
"""Utility rules for soy templates.
Before you use any functions from this file, include the following
line in your BUILD file.
subinclude('//testing/chronos/bite/builddefs:BUILD.soy_rule')
"""
def GenSoyJs(rule_name, files, flags=None, locales=None, compiler=None):
"""Generate the javascript files from the soy template files.
Args:
rule_name - The rule name.
files - The list of soy template files.
flags - A list of compiler flags, defaults to None; i.e. use the
function's default.
locales - A list of locales to generate templates for, defaults to None;
i.e. do not use locales.
compiler - The compiler to create javascript from soy files, defaults
to None; i.e. use the function's default.
"""
DEFAULT_LOCALES = [ 'en' ]
locales = locales or DEFAULT_LOCALES
DEFAULT_COMPILER = '//java/com/google/template/soy:SoyToJsSrcCompiler'
compiler = compiler or DEFAULT_COMPILER
DEFAULT_FLAGS = [
'--should_provide_require_soy_namespaces',
'--shouldGenerateJsdoc',
'--locales=' + ','.join(locales),
'--message_file_path_format="/home/build//' +
'googledata/transconsole/xtb/SoyExamples/{LOCALE}.xtb"',
'--output_path_format="$(@D)/{INPUT_FILE_NAME}__{LOCALE_LOWER_CASE}.js"'
]
flags = ' '.join(flags or DEFAULT_FLAGS)
targets = [
soyFile + '__' + locale.lower().replace('-', '_') + '.js'
for soyFile in files for locale in locales
]
command = ('$(location ' + compiler + ') ' + flags + ' $(SRCS)')
genrule(name = rule_name,
srcs = files,
tools = [ compiler ],
outs = targets,
cmd = command)
| Python |
# -*- mode: python; -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: jasonstredwick@google.com (Jason Stredwick)
"""Utility rules for building extension bundles.
Before you use any functions from this file, include the following
line in your BUILD file.
subinclude('//testing/chronos/bite/builddefs:BUILD.bundle')
Usage example:
MIMIC_SRC = '//experimental/users/jasonstredwick/mimic/mimic'
BUNDLE_ENTRIES = [
# Mimic JavaScript entries.
FilesetEntry(srcdir = MIMIC_SRC + ':BUILD',
destdir = 'chrome/content',
files = [ ':javascript_files' ],
),
# Extension resources.
FilesetEntry(files =
glob([ 'chrome/**' ]) +
[ 'chrome.manifest', 'install.rdf' ]
),
]
# Generated rules: mimic_soft_rule and mimic_soft
# Generated output: mimic_soft and mimic (both directories)
GenBundle('mimic', BUNDLE_ENTRIES)
# Generated rule: mimic_xpi_rule
# Generated output: mimic.xpi
GenXPI('mimic', [ ':mimic_soft' ])
Dependency example:
BOTS_PKG = '//testing/chronos/appcompat/extension'
EXTENSION_ENTRIES = [
# Grab the extension bundle for Bots AppCompat.
FilesetEntry(srcdir = BOTS_PKG' + :bundle_soft',
destdir = 'extension',
),
]
MIMIC_PKG = '//experimental/users/jasonstredwick/mimic'
FF35_ENTRIES = EXTENSION_ENTRIES + [
# Grab the extension bundle for firefox3_5 mimic package.
FilesetEntry(srcdir = MIMIC_PKG + '/firefox3_5:mimic_soft'),
]
# Generated rules: mimic_ff35_soft_rule and mimic_ff35_soft
# Generated output: mimic_ff35_soft and mimic_ff35 (both directories)
GenBundle('mimic_ff35', FF35_ENTRIES)
"""
def GenBundle(base_name, fileset_entries):
"""Generate directories containing unpacked extensions.
This function will generate two rules:
[base_name]_soft_rule and [base_name]_rule.
The rules created by this function generate folders using the given list
of FilesetEntries. The soft rule will generate a folder containing only
symbolic links to its files while the other rule will generate a folder
containing the actual files.
The second version is necessary because the Fileset only outputs symlinks,
and Chrome can't load unpacked extensions that contain symbolically linked
files. Also note that the second rule can not run on forge because forge
can not create and return entire folders to the client.
Args:
base_name - The name used to create the directories.
fileset_entries - A list of FilesetEntry.
"""
soft_rule_name = base_name + 'soft_rule'
hard_rule_name = base_name + '_rule'
soft_output = base_name + '_soft'
hard_output = base_name
Fileset(name = soft_rule_name,
out = soft_output,
entries = fileset_entries,
)
genrule(name = hard_rule_name,
srcs = [ ':' + soft_output ],
outs = [ hard_output ],
output_to_bindir = 1,
cmd = 'cp -rfL $(SRCS) $(OUTS)',
local = 1,
)
def GenXPI(base_name, target_srcs):
"""Generate an xpi file for the specified extension.
Create the extension bundle (.xpi) for Firefox. Just drag and drop the file
onto the FireFox browser to install it. The generated filename will be
[base_name].xpi and the rule is [base_name]_xpi_rule
Args:
base_name - The base name for the xpi file.
target_srcs - A list containing the package:rules to be compressed.
"""
rule_name = base_name + '_xpi_rule'
output = base_name + '.xpi'
genrule(name = rule_name,
srcs = target_srcs,
outs = [ output ],
output_to_bindir = 1,
cmd = 'cp -rfL $(SRCS) temp; cd temp; zip -r ../$(OUTS) *',
local = 1,
)
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: jasonstredwick@google.com (Jason Stredwick)
#
# Common build definitions for packages within BITE's client code base.
subinclude('//javascript/externs/builddefs:BUILD');
subinclude('//javascript/closure/builddefs:BUILD');
DEBUG_COMPILER_DEFS = CLOSURE_COMPILER_FLAGS_UNOBFUSCATED + [
'--generate_exports',
]
OPTIMIZED_COMPILER_DEFS = CLOSURE_COMPILER_FLAGS_STRICT + [
'--generate_exports',
]
COMPILER_DEFS = DEBUG_COMPILER_DEFS + [
'--aggressive_var_check_level=ERROR',
'--check_global_names_level=ERROR',
'--check_provides=ERROR',
'--jscomp_error=accessControls',
'--jscomp_error=checkRegExp',
'--jscomp_error=checkTypes',
'--jscomp_error=checkVars',
'--jscomp_error=deprecated',
'--jscomp_error=fileoverviewTags',
'--jscomp_error=invalidCasts',
'--jscomp_error=missingProperties',
'--jscomp_error=nonStandardJsDocs',
'--jscomp_error=strictModuleDepCheck',
'--jscomp_error=undefinedVars',
'--jscomp_error=unknownDefines',
'--jscomp_error=visibility',
'--strict',
]
CSS_COMPILER_DEFS = [
'--add_copyright',
'--allow_unrecognized_functions',
'--allowed_non_standard_function=color-stop',
'--allowed_non_standard_pseudo_type=nth-child',
'--allowed_non_standard_function=-moz-linear-gradient',
'--allowed_non_standard_function=-webkit-gradient',
'--allowed_non_standard_function=from',
'--allowed_non_standard_function=to',
'--allowed_non_standard_function=alpha',
'--allow_ie_function_syntax',
'--allow_unrecognized_pseudo_types',
'--simplify_css',
'--eliminate_dead_styles',
]
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build BITE."""
__author__ = ('ralphj@google.com (Julie Ralph)'
'jasonstredwick@google.com (Jason Stredwick)')
import logging
import os
import shutil
import sys
from builddefs.tools import access as tools
from builddefs.tools import base
# Commands used by the build script.
COMMAND_BUILD = 'build'
COMMAND_CLEAN = 'clean'
COMMAND_HELP = 'help'
# Command options.
COMMAND_OPTION_ALL = 'all'
COMMAND_OPTION_EXPUNGE = 'expunge'
COMMAND_OPTION_EXTENSION = 'extension'
COMMAND_OPTION_SERVER = 'server'
COMMAND_OPTION_SERVER_APPENGINE = 'server_appengine'
### Root paths
# The relative path from the build script to where all dependencies are stored.
DEPS_ROOT = 'deps'
# The relative path from the build script to where all outputs are stored.
OUTPUT_ROOT = 'output'
# The relative path from the build script to the extension server generated
# files.
EXTENSION_ROOT = os.path.join(OUTPUT_ROOT, 'extension')
SERVER_APPENGINE_ROOT = os.path.join(OUTPUT_ROOT, 'server-appengine')
SERVER_ROOT = SERVER_APPENGINE_ROOT
# Outline the dependencies for building BITE; how to install and to where.
DEPS = {
'ace': {
'tool': 'git',
'command': 'clone',
'url': 'git://github.com/ajaxorg/ace.git',
'output': os.path.join(DEPS_ROOT, 'ace')
},
'atoms': {
'tool': 'svn',
'command': 'checkout',
'url': 'http://selenium.googlecode.com/svn/trunk/javascript/atoms',
'output': os.path.join(DEPS_ROOT, 'selenium-atoms-lib')
},
'gdata': {
'tool': 'hg',
'command': 'clone',
'url': 'https://code.google.com/p/gdata-python-client/',
'output': os.path.join(DEPS_ROOT, 'gdata')
}
}
class Error(Exception):
"""General exception for this module."""
pass
def CreateRoots():
"""Ensure the common folders exist and are writable."""
paths = [DEPS_ROOT, OUTPUT_ROOT]
for path in paths:
if os.path.exists(path) and not os.path.isdir(path):
logging.error('%s already exists and is not a directory.' % path)
raise Error
elif not os.path.exists(path):
os.mkdir(path)
if not os.access(path, os.W_OK):
logging.error('%s is not writable.' % path)
raise Error
def Initialize():
"""Ensures the folders and tools are present."""
print('Initializing build.')
try:
CreateRoots()
# Initialize the tools and library dependencies.
tools.Initialize(DEPS_ROOT)
InitializeDeps()
except (base.ToolError, Error):
print('Exiting ...')
sys.exit()
def InitializeDeps():
"""Ensures the library dependencies are present."""
failed = False
for key in DEPS:
dep = DEPS[key]
command = dep['command']
output = dep['output']
tool_name = dep['tool']
url = dep['url']
print('Initializing library dependency (%s).' % url)
try:
if not os.path.exists(output):
tool = tools.Get(tool_name)
tool.Execute([command, url, output], DEPS_ROOT)
if not os.path.exists(output):
logging.error('...Failed')
raise Error
except (base.ToolError, Error):
failed = True
if failed:
raise Error
def Build(target):
"""Build the specified target(s).
The main build function ensures the assumptions about individual targets'
state are held.
Args:
target: The target to build. (string)
"""
try:
if target == COMMAND_OPTION_ALL:
Clean(COMMAND_OPTION_EXTENSION)
BuildExtension()
Clean(COMMAND_OPTION_SERVER_APPENGINE)
BuildServerAppengine()
elif target == COMMAND_OPTION_EXTENSION:
Clean(COMMAND_OPTION_EXTENSION)
BuildExtension()
elif target == COMMAND_OPTION_SERVER:
# TODO(jasonstredwick): Refactor for genericity.
Clean(COMMAND_OPTION_SERVER_APPENGINE)
BuildServerAppengine()
elif target == COMMAND_OPTION_SERVER_APPENGINE:
Clean(COMMAND_OPTION_SERVER_APPENGINE)
BuildServerAppengine()
else:
logging.error('Target (%s) not recognized for build.' % target)
raise Error
except Error:
pass
def BuildExtension():
"""Construct the BITE extension.
Assumes that no material is present in the output folder.
"""
# Construct extension folder structure.
os.mkdir(EXTENSION_ROOT)
if not os.path.exists(EXTENSION_ROOT):
logging.error('Failed to create output folder for extension.')
return
# Move over extension folders.
extension_folders = ['styles', 'imgs']
for folder in extension_folders:
path_src = os.path.join('extension', folder)
path_dst = os.path.join(EXTENSION_ROOT, folder)
if os.path.exists(path_dst):
shutil.rmtree(path_dst)
shutil.copytree(path_src, path_dst)
# Move static resources.
html_path = os.path.join('extension', 'html')
static_files = [os.path.join(html_path, 'background.html'),
os.path.join(html_path, 'popup.html'),
os.path.join('extension', 'src', 'options', 'options.html'),
os.path.join('extension', 'manifest.json')]
for static_file in static_files:
shutil.copy(static_file, EXTENSION_ROOT)
# Combine tool resources
# TODO (jasonstredwick): Fix this.
rpf_path = os.path.join('tools', 'rpf', 'extension')
static_files = [os.path.join(rpf_path, 'html', 'console.html')]
for static_file in static_files:
shutil.copy(static_file, EXTENSION_ROOT)
static_files = [os.path.join(rpf_path, 'styles', 'recordmodemanager.css')]
for static_file in static_files:
shutil.copy(static_file, os.path.join(EXTENSION_ROOT, 'styles'))
# Copy the required ACE files.
ace_dst = os.path.join(EXTENSION_ROOT, 'ace')
ace_src = os.path.join(DEPS['ace']['output'], 'build', 'src')
shutil.copytree(ace_src, ace_dst)
# Compile the soy templates.
genfiles_path = 'genfiles'
os.mkdir(genfiles_path)
extension_src_path = os.path.join('extension', 'templates')
bug_src_path = os.path.join('tools', 'bug', 'extension', 'templates')
rpf_src_path = os.path.join('tools', 'rpf', 'extension', 'templates')
soy_files = {'popup': extension_src_path,
'rpfconsole': rpf_src_path,
'rpf_dialogs': rpf_src_path,
'locatorsupdater': rpf_src_path,
'consoles': bug_src_path,
'newbug_console': bug_src_path,
'newbug_type_selector': bug_src_path}
try:
soy_compiler = tools.Get(tools.SOY_TO_JS_COMPILER)
except base.ToolError:
logging.error('Extension build process failed, halting.')
Clean(COMMAND_OPTION_EXTENSION)
return
for filename in soy_files:
src = os.path.join(soy_files[filename], filename) + '.soy'
dst = os.path.join(genfiles_path, filename) + '.js'
params = ['--shouldProvideRequireSoyNamespaces',
('--outputPathFormat %s' % dst),
src]
try:
soy_compiler.Execute(params, DEPS_ROOT)
if not os.path.exists(dst):
raise Error
except (base.ToolError, Error):
logging.error('Failed to compile soy file (%s).' % filename)
shutil.rmtree(genfiles_path)
Clean(COMMAND_OPTION_EXTENSION)
return
# Compile javascript.
js_targets = {os.path.join(extension_src_path, 'background.js'):
os.path.join(EXTENSION_ROOT, 'background_script.js'),
os.path.join(rpf_src_path, 'console.js'):
os.path.join(EXTENSION_ROOT, 'console_script.js'),
os.path.join(extension_src_path, 'content.js'):
os.path.join(EXTENSION_ROOT, 'content_script.js'),
os.path.join(extension_src_path, 'elementhelper.js'):
os.path.join(EXTENSION_ROOT, 'elementhelper_script.js'),
os.path.join(rpf_src_path, 'getactioninfo.js'):
os.path.join(EXTENSION_ROOT, 'getactioninfo_script.js'),
os.path.join(extension_src_path, 'popup.js'):
os.path.join(EXTENSION_ROOT, 'popup_script.js'),
os.path.join(extension_src_path, 'options', 'page.js'):
os.path.join(EXTENSION_ROOT, 'options_script.js')}
try:
closure_builder = tools.Get(tools.CLOSURE_COMPILER)
except base.ToolError:
logging.error('Extension build process failed, halting.')
shutil.rmtree(genfiles_path)
Clean(COMMAND_OPTION_EXTENSION)
return
for target in js_targets:
src = target
dst = js_targets[target]
params = [('--root=%s' % genfiles_path),
('--root=%s' % extension_src_path),
('--root=%s' % rpf_src_path),
('--root=%s' % bug_src_path),
('--root=%s' % 'common'),
# TODO (jasonstredwick): Figure out how to link this dep.
('--root=%s' % os.path.join(DEPS_ROOT, 'soy-compiler')),
('--root=%s' % DEPS['atoms']['output']),
('--input=%s' % src),
('--output_file=%s' % dst),
('--output_mode=compiled')]
try:
closure_builder.Execute(params, DEPS_ROOT)
if not os.path.exists(dst):
raise Error
except (base.ToolError, Error):
logging.error('Failed to compile JavaScript file (%s).' % filename)
shutil.rmtree(genfiles_path)
Clean(COMMAND_OPTION_EXTENSION)
return
# Clean up generated files.
shutil.rmtree(genfiles_path)
def BuildServerAppengine():
# Copy gData files to the server.
shutil.copytree('gdata-python-client/src/gdata', 'src/server/gdata')
shutil.copytree('gdata-python-client/src/atom', 'src/server/atom')
def Clean(target):
"""Cleans the given target; i.e. remove it.
Args:
target: The target to remove. (string)
"""
try:
if target == COMMAND_OPTION_EXPUNGE:
if os.path.exists(DEPS_ROOT):
shutil.rmtree(DEPS_ROOT)
if os.path.exists(OUTPUT_ROOT):
shutil.rmtree(OUTPUT_ROOT)
CreateRoots()
elif target == COMMAND_OPTION_ALL:
if os.path.exists(OUTPUT_ROOT):
shutil.rmtree(OUTPUT_ROOT)
CreateRoots()
elif target == COMMAND_OPTION_EXTENSION:
if os.path.exists(EXTENSION_ROOT):
shutil.rmtree(EXTENSION_ROOT)
elif target == COMMAND_OPTION_SERVER:
if os.path.exists(SERVER_ROOT):
shutil.rmtree(SERVER_ROOT)
elif target == COMMAND_OPTION_SERVER_APPENGINE:
if os.path.exists(SERVER_APPENGINE_ROOT):
shutil.rmtree(SERVER_APPENGINE_ROOT)
else:
logging.error('Target (%s) not recognized for clean.' % target)
raise Error
except (OSError, Error):
logging.error('clean failed; could not remove root folders.')
raise Error
def Usage():
"""Displays how to use the build script."""
usage = '\n'.join([
'Usage: python build.py <command> <option>',
'',
'Available commands:',
' %s\t\t\tBuilds the given targets.',
' %s\t\t\tCleans up the generated output.',
' %s\t\t\tDisplays this usage message.',
'',
'Available options:',
' %s\t\t\tRemoves all targets, but does not include tools and ',
' \t\t\texternal dependencies.',
' %s\t\tRemoves all targets and all tools and dependencies.',
' \t\t\t(Only applies to clean)',
' %s\t\tThe BITE extension.',
' %s\t\tThe default server build (AppEngine).',
' %s\tThe AppEngine server build.'
])
# Replace commands.
usage = (usage %
# Replace commands.
(COMMAND_BUILD, COMMAND_CLEAN, COMMAND_HELP,
# Replace options.
COMMAND_OPTION_ALL, COMMAND_OPTION_EXPUNGE,
COMMAND_OPTION_EXTENSION, COMMAND_OPTION_SERVER,
COMMAND_OPTION_SERVER_APPENGINE))
print(usage)
def main():
"""The main entry point for the script."""
argc = len(sys.argv)
args = sys.argv
if argc == 1 or args[1] == COMMAND_HELP or argc != 3:
Usage()
sys.exit()
command = None
command_name = args[1]
if command_name == COMMAND_BUILD:
command = Build
elif command_name == COMMAND_CLEAN:
command = Clean
else:
Usage()
sys.exit()
try:
Initialize()
command and command(args[2])
except Error:
pass
print('exiting...')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The build helper."""
__author__ = ('ralphj@google.com (Julie Ralph)'
'jason.stredwick@gmail.com (Jason Stredwick)')
import logging
import optparse
import os
import shutil
import subprocess
import sys
import time
import urllib
import zipfile
# Common folders.
GENFILES_ROOT = 'genfiles'
OUTPUT_ROOT = 'output'
DEPS_ROOT = 'deps'
# Common roots
BUG_ROOT = os.path.join('tools', 'bugs', 'extension')
RPF_ROOT = os.path.join('tools', 'rpf', 'extension')
# Output paths
EXTENSION_DST = os.path.join(OUTPUT_ROOT, 'extension')
SERVER_DST = os.path.join(OUTPUT_ROOT, 'server')
IMGS_DST = os.path.join(EXTENSION_DST, 'imgs')
OPTIONS_DST = os.path.join(EXTENSION_DST, 'options')
STYLES_DST = os.path.join(EXTENSION_DST, 'styles')
# Keywords for DEPS
CHECKOUT_COMMAND = 'checkout'
ROOT = 'root'
URL = 'url'
# Define dependencies that are checkout from various repositories.
DEPS = {
'ace': {
ROOT: os.path.join(DEPS_ROOT, 'ace'),
URL: 'git://github.com/ajaxorg/ace.git',
CHECKOUT_COMMAND: 'git clone %s %s'
},
'gdata-python-client': {
ROOT: os.path.join(DEPS_ROOT, 'gdata-python-client'),
URL: 'http://code.google.com/p/gdata-python-client/',
CHECKOUT_COMMAND: 'hg clone %s %s'
},
'selenium-atoms-lib': {
ROOT: os.path.join(DEPS_ROOT, 'selenium-atoms-lib'),
URL: 'http://selenium.googlecode.com/svn/trunk/javascript/atoms',
CHECKOUT_COMMAND: 'svn checkout %s %s'
},
'closure-library': {
ROOT: os.path.join(DEPS_ROOT, 'closure', 'closure-library'),
URL: 'http://closure-library.googlecode.com/svn/trunk/',
CHECKOUT_COMMAND: 'svn checkout %s %s'
},
'urlnorm': {
ROOT: os.path.join(DEPS_ROOT, 'urlnorm'),
URL: 'git://gist.github.com/246089.git',
CHECKOUT_COMMAND: 'git clone %s %s'
},
'mrtaskman': {
ROOT: os.path.join(DEPS_ROOT, 'mrtaskman'),
URL: 'http://code.google.com/p/mrtaskman',
CHECKOUT_COMMAND: 'git clone %s %s'
}
}
CLOSURE_COMPILER_ROOT = os.path.join(DEPS_ROOT, 'closure')
CLOSURE_COMPILER_JAR = os.path.join(CLOSURE_COMPILER_ROOT, 'compiler.jar')
CLOSURE_COMPILER_URL = ('http://closure-compiler.googlecode.com/files/'
'compiler-latest.zip')
SOY_COMPILER_ROOT = os.path.join(DEPS_ROOT, 'soy')
SOY_COMPILER_JAR = os.path.join(SOY_COMPILER_ROOT, 'SoyToJsSrcCompiler.jar')
SOY_COMPILER_URL = ('http://closure-templates.googlecode.com/files/'
'closure-templates-for-javascript-latest.zip')
SOY_COMPILER_SRC = os.path.join(DEPS_ROOT, 'soy', 'src')
SOYDATA_URL = ('http://closure-templates.googlecode.com/svn/trunk/javascript/'
'soydata.js')
# Compiling commands.
CLOSURE_COMPILER = os.path.join(DEPS['closure-library'][ROOT], 'closure',
'bin', 'build', 'closurebuilder.py')
COMPILER_FLAGS = [
'--compiler_flags=--formatting=pretty_print',
'--compiler_flags=--generate_exports',
'--compiler_flags=--js=%s' % os.path.join(
DEPS['closure-library'][ROOT], 'closure', 'goog', 'deps.js'),
'--compiler_flags=--jscomp_error=accessControls',
'--compiler_flags=--jscomp_error=ambiguousFunctionDecl',
'--compiler_flags=--jscomp_error=checkRegExp',
'--compiler_flags=--jscomp_error=checkTypes',
'--compiler_flags=--jscomp_error=checkVars',
'--compiler_flags=--jscomp_error=constantProperty',
'--compiler_flags=--jscomp_error=deprecated',
'--compiler_flags=--jscomp_error=fileoverviewTags',
'--compiler_flags=--jscomp_error=globalThis',
'--compiler_flags=--jscomp_error=invalidCasts',
'--compiler_flags=--jscomp_error=missingProperties',
'--compiler_flags=--jscomp_error=nonStandardJsDocs',
'--compiler_flags=--jscomp_error=strictModuleDepCheck',
'--compiler_flags=--jscomp_error=undefinedVars',
'--compiler_flags=--jscomp_error=unknownDefines',
'--compiler_flags=--jscomp_error=visibility',
('--compiler_flags=--externs=%s' % os.path.join(
'common', 'extension', 'externs', 'chrome_extensions.js')),
('--compiler_flags=--externs=%s' % os.path.join(
'common', 'extension', 'externs', 'rpf_externs.js')),
('--compiler_flags=--externs=%s' % os.path.join(
'common', 'extension', 'externs', 'ace_externs.js'))
]
SOY_COMPILER_COMMAND = ' '.join([('java -jar %s' % SOY_COMPILER_JAR),
'--shouldProvideRequireSoyNamespaces',
'--shouldGenerateJsdoc',
'--outputPathFormat %(output)s',
'%(input)s'])
COMPILE_CLOSURE_COMMAND_FOR_SERVER = ' '.join([
sys.executable, CLOSURE_COMPILER,
('--root=%s' % os.path.join('server', 'scripts')),
('--root=%s' % DEPS['closure-library'][ROOT]),
('--root=%s' % SOY_COMPILER_SRC),
('--root=%s' % GENFILES_ROOT),
('--root=%s' % DEPS['selenium-atoms-lib'][ROOT]),
'--input=%(input)s',
'--output_mode=compiled',
'--output_file=%(output)s',
('--compiler_jar=%s' % CLOSURE_COMPILER_JAR)] + COMPILER_FLAGS)
class ClosureError(Exception):
pass
def Clean():
"""Clean removes the generated files and output."""
if os.path.exists(OUTPUT_ROOT):
shutil.rmtree(OUTPUT_ROOT)
if os.path.exists(GENFILES_ROOT):
shutil.rmtree(GENFILES_ROOT)
def CleanExpunge():
"""Cleans up the generated and output files plus the dependencies."""
if os.path.exists(DEPS_ROOT):
shutil.rmtree(DEPS_ROOT)
Clean()
def CompileScript(filename_base, filepath, suffix_in, suffix_out, command):
"""Compile a script based on the given input file.
Args:
filename: The base name of the script to compile. (string)
filepath: The location of the the script. (string)
suffix_in: The suffix to add to the basename for input. (string)
suffix_out: The suffix to add to the basename for output. (string)
command: The compile command to use.
Returns:
The process which actually is executing the command.
"""
input = os.path.join(filepath, ('%s%s' % (filename_base, suffix_in)))
output = os.path.join(GENFILES_ROOT, ('%s%s' % (filename_base, suffix_out)))
# For speed, only compile the script if it is not already compiled.
if os.path.exists(output):
return
data = {'input': input,
'output': output}
result = ExecuteCommand(command % data, True)
if result:
if os.path.exists(output):
os.remove(output)
return result
def ExecuteCommand(command, no_wait=False):
"""Execute the given command and return the output.
Args:
command: A string representing the command to execute.
no_wait: Whether not to wait for finished.
Returns:
The process.
"""
process = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if not no_wait:
results = process.communicate()
if process.returncode:
logging.error(results[1])
return process
def SetupClosureCompiler():
"""Setup the closure library and compiler.
Checkout the closure library using svn if it doesn't exist. Also, download
the closure compiler.
Raises:
ClosureError: If the setup fails.
"""
# Download the compiler jar if it doesn't exist.
if not os.path.exists(CLOSURE_COMPILER_JAR):
print('Downloading closure compiler jar file.')
(compiler_zip, _) = urllib.urlretrieve(CLOSURE_COMPILER_URL)
compiler_zipfile = zipfile.ZipFile(compiler_zip)
compiler_zipfile.extract('compiler.jar', CLOSURE_COMPILER_ROOT)
if not os.path.exists(CLOSURE_COMPILER_JAR):
logging.error('Could not download the closure compiler jar.')
raise ClosureError('Could not find the closure compiler.')
def SetupDep(dep_name):
"""Download the dependency to the correct location.
Args:
dep_name: The name of the dependency to setup. (string)
"""
dep = DEPS[dep_name]
if not os.path.exists(dep[ROOT]):
ExecuteCommand(dep[CHECKOUT_COMMAND] % (dep[URL], dep[ROOT]))
if not os.path.exists(dep[ROOT]):
logging.error('Could not checkout %s from %s.' % (dep_name, dep[URL]))
raise ClosureError('Could not set up %s.' % dep_name)
def SetupSoyCompiler():
"""Setup the closure library and compiler.
Checkout the closure library using svn if it doesn't exist. Also, download
the closure compiler.
Raises:
ClosureError: If the setup fails.
"""
# Download the soy compiler jar if it doesn't exist.
soyutils_src = os.path.join(SOY_COMPILER_SRC, 'soyutils_usegoog.js')
if (not os.path.exists(SOY_COMPILER_JAR) or
not os.path.exists(soyutils_src)):
print('Downloading soy compiler and utils.')
(soy_compiler_zip, _) = urllib.urlretrieve(SOY_COMPILER_URL)
soy_compiler_zipfile = zipfile.ZipFile(soy_compiler_zip)
soy_compiler_zipfile.extract('SoyToJsSrcCompiler.jar', SOY_COMPILER_ROOT)
soy_compiler_zipfile.extract('soyutils_usegoog.js', SOY_COMPILER_SRC)
if (not os.path.exists(SOY_COMPILER_JAR) or
not os.path.exists(soyutils_src)):
logging.error('Could not download the soy compiler jar.')
raise ClosureError('Could not find the soy compiler.')
# Download required soydata file, which is required for soyutils_usegoog.js
# to work.
soydata_src = os.path.join(SOY_COMPILER_SRC, 'soydata.js')
if not os.path.exists(soydata_src):
urllib.urlretrieve(SOYDATA_URL, soydata_src)
if not os.path.exists(soydata_src):
logging.error('Could not download soydata.js.')
raise ClosureError('Could not fine soydata.js')
def WaitUntilSubprocessesFinished(ps):
"""Waits until the given sub processes are all finished."""
while True:
status = [p.poll() for p in ps if p != None]
if all([x is not None for x in status]):
for p in ps:
if p != None and p.returncode != 0:
print p.stderr.read()
return
else:
time.sleep(0.2)
def ParseOptions():
"""Parses the command and perform tasks."""
result = {'build_extension_only': False}
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage)
parser.add_option('--clean', dest='build_clean',
action='store_true', default=False,
help='Clean the build directories.')
parser.add_option('--expunge', dest='build_expunge',
action='store_true', default=False,
help='Clean the build directories and deps.')
parser.add_option('--deps', dest='build_deps',
action='store_true', default=False,
help='Download deps.')
parser.add_option('--serveronly', dest='build_server',
action='store_true', default=False,
help='Build the server code only.')
parser.add_option('--extensiononly', dest='build_extension',
action='store_true', default=False,
help='Build the extension code only.')
(options, _) = parser.parse_args()
# Exit if only want to clean.
if options.build_clean:
Clean()
exit()
elif options.build_expunge:
CleanExpunge()
exit()
elif options.build_server:
server_src = os.path.join(OUTPUT_ROOT, 'server')
if os.path.exists(server_src):
shutil.rmtree(server_src)
if os.path.exists(GENFILES_ROOT):
shutil.rmtree(GENFILES_ROOT)
CopyServerFiles()
exit()
elif options.build_extension:
extension_src = os.path.join(OUTPUT_ROOT, 'extension')
if os.path.exists(extension_src):
shutil.rmtree(extension_src)
if os.path.exists(GENFILES_ROOT):
shutil.rmtree(GENFILES_ROOT)
result['build_extension_only'] = True
# Set up the directories that will be built into.
paths = [GENFILES_ROOT, DEPS_ROOT]
for path in paths:
if not os.path.exists(path):
os.mkdir(path)
# Get external resources.
for dep_name in DEPS:
SetupDep(dep_name)
SetupClosureCompiler()
SetupSoyCompiler()
if options.build_deps:
exit()
return result
def CopyAceFiles():
"""Copies the ACE files to the destination folder."""
# Copy the required ACE files.
ace_dst = os.path.join(EXTENSION_DST, 'ace')
ace_src = os.path.join(DEPS['ace'][ROOT], 'build', 'src')
if os.path.exists(ace_dst):
shutil.rmtree(ace_dst)
shutil.copytree(ace_src, ace_dst)
def CopyServerFiles():
"""Copies the server files to the right location."""
# Create server bundle.
print('Creating server bundle.')
soy_files = {
'explore_page': os.path.join('server', 'scripts', 'soys'),
'set_details_page': os.path.join('server', 'scripts', 'soys'),
'result_page': os.path.join('server', 'scripts', 'soys'),
'run_details_settings': os.path.join('server', 'scripts', 'soys'),
'run_details_results': os.path.join('server', 'scripts', 'soys'),
'run_details_overview': os.path.join('server', 'scripts', 'soys'),
'run_details_page': os.path.join('server', 'scripts', 'soys'),
'set_details_runs': os.path.join('server', 'scripts', 'soys'),
'project_details_page': os.path.join('server', 'scripts', 'soys'),
'store': os.path.join('server', 'scripts', 'soys')
}
ps = []
current_time = time.time()
for soy_filename in soy_files:
soy_filepath = soy_files[soy_filename]
ps.append(CompileScript(soy_filename, soy_filepath, '.soy', '.soy.js',
SOY_COMPILER_COMMAND))
WaitUntilSubprocessesFinished(ps)
ps = []
# JavaScript
js_targets = {
'url_parser': os.path.join('server', 'scripts'),
'store_edit': os.path.join('server', 'scripts'),
'store_view': os.path.join('server', 'scripts')
}
for target in js_targets:
target_filepath = js_targets[target]
ps.append(CompileScript(target, target_filepath, '.js', '_script.js',
COMPILE_CLOSURE_COMMAND_FOR_SERVER))
WaitUntilSubprocessesFinished(ps)
print 'Totally %s (s) elapsed for server!' % (time.time() - current_time)
server_src = 'server'
shutil.copytree(server_src, SERVER_DST)
bugs_src = os.path.join('tools', 'bugs', 'server', 'appengine')
shutil.copytree(bugs_src, os.path.join(SERVER_DST, 'bugs'))
common_src = os.path.join('common', 'server', 'appengine')
shutil.copytree(common_src, os.path.join(SERVER_DST, 'common'))
gdata_src = os.path.join(DEPS['gdata-python-client'][ROOT], 'src', 'gdata')
shutil.copytree(gdata_src, os.path.join(SERVER_DST, 'gdata'))
atom_src = os.path.join(DEPS['gdata-python-client'][ROOT], 'src', 'atom')
shutil.copytree(atom_src, os.path.join(SERVER_DST, 'atom'))
urlnorm_src = os.path.join(DEPS['urlnorm'][ROOT], 'urlnorm.py')
shutil.copy(urlnorm_src, os.path.join(SERVER_DST, 'third_party'))
mrtaskman_root = DEPS['mrtaskman'][ROOT]
mrtaskman_src = os.path.join(mrtaskman_root, 'server', 'util')
mrtaskman_dst = os.path.join(SERVER_DST, 'util')
shutil.copytree(mrtaskman_src, mrtaskman_dst)
js_src = os.path.join(GENFILES_ROOT, 'url_parser_script.js')
js_dst = os.path.join(SERVER_DST, 'scripts', 'client_script.js')
shutil.copyfile(js_src, js_dst)
js_src = os.path.join(GENFILES_ROOT, 'store_edit_script.js')
js_dst = os.path.join(SERVER_DST, 'scripts', 'store_edit_script.js')
shutil.copyfile(js_src, js_dst)
js_src = os.path.join(GENFILES_ROOT, 'store_view_script.js')
js_dst = os.path.join(SERVER_DST, 'scripts', 'store_view_script.js')
shutil.copyfile(js_src, js_dst)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build the BITE Extension."""
__author__ = ('ralphj@google.com (Julie Ralph)'
'jason.stredwick@gmail.com (Jason Stredwick)')
from buildhelper import *
import logging
import optparse
import os
import shutil
import subprocess
import sys
import time
import urllib
import zipfile
COMPILE_CLOSURE_COMMAND = ' '.join([
sys.executable, CLOSURE_COMPILER,
('--root=%s' % os.path.join('common', 'extension')),
('--root=%s' % os.path.join('extension', 'src')),
('--root=%s' % os.path.join(BUG_ROOT, 'src')),
('--root=%s' % os.path.join(RPF_ROOT, 'src', 'libs')),
('--root=%s' % DEPS['closure-library'][ROOT]),
('--root=%s' % SOY_COMPILER_SRC),
('--root=%s' % GENFILES_ROOT),
('--root=%s' % DEPS['selenium-atoms-lib'][ROOT]),
'--output_mode=compiled',
('--compiler_jar=%s' % CLOSURE_COMPILER_JAR)] +
COMPILER_FLAGS +
['--input=%(input)s', '--output_file=%(output)s'])
def main():
result = ParseOptions()
# Compile the closure scripts.
# Soy
soy_files = {
'popup': os.path.join('extension', 'templates'),
'consoles': os.path.join(BUG_ROOT, 'templates'),
'common_ux': os.path.join('common', 'extension', 'ux'),
'newbug_console': os.path.join(BUG_ROOT, 'templates'),
'newbug_type_selector': os.path.join(BUG_ROOT, 'templates'),
'rpfconsole': os.path.join(RPF_ROOT, 'templates'),
'rpf_dialogs': os.path.join(RPF_ROOT, 'templates'),
'locatorsupdater': os.path.join(RPF_ROOT, 'templates'),
'explore': os.path.join('extension', 'src', 'project', 'templates'),
'general': os.path.join('extension', 'src', 'project', 'templates'),
'member': os.path.join('extension', 'src', 'project', 'templates'),
'settings': os.path.join('extension', 'src', 'project', 'templates')
}
ps = []
current_time = time.time()
for soy_filename in soy_files:
soy_filepath = soy_files[soy_filename]
ps.append(CompileScript(soy_filename, soy_filepath, '.soy', '.soy.js',
SOY_COMPILER_COMMAND))
WaitUntilSubprocessesFinished(ps)
ps = []
# JavaScript
js_targets = {
'background': os.path.join('extension', 'src', 'bite'),
'content': os.path.join('extension', 'src', 'bite'),
'getactioninfo': os.path.join(RPF_ROOT, 'src', 'libs'),
'console': os.path.join(RPF_ROOT, 'src', 'libs'),
'elementhelper': os.path.join('common', 'extension', 'dom'),
'popup': os.path.join('extension', 'src'),
'page': os.path.join('extension', 'src', 'options')
}
for target in js_targets:
target_filepath = js_targets[target]
ps.append(CompileScript(target, target_filepath, '.js', '_script.js',
COMPILE_CLOSURE_COMMAND))
WaitUntilSubprocessesFinished(ps)
print 'Totally %s (s) elapsed!' % (time.time() - current_time)
# Remove the outputs, so they will be created again.
if os.path.exists(OUTPUT_ROOT):
shutil.rmtree(OUTPUT_ROOT)
os.mkdir(OUTPUT_ROOT)
# Create extension bundle.
print('Creating extension bundle.')
# Create the extension bundle and options path.
paths = [EXTENSION_DST, OPTIONS_DST, STYLES_DST]
for path in paths:
if not os.path.exists(path):
os.mkdir(path)
# Manifest
shutil.copy(os.path.join('extension', 'manifest.json'), EXTENSION_DST)
# Styles
styles = [os.path.join('extension', 'styles', 'consoles.css'),
os.path.join('extension', 'styles', 'options.css'),
os.path.join('extension', 'styles', 'popup.css'),
os.path.join('extension', 'styles', 'rpf_console.css'),
os.path.join(RPF_ROOT, 'styles', 'recordmodemanager.css')]
for style in styles:
shutil.copy(style, STYLES_DST)
# Images
shutil.copytree(os.path.join('extension', 'imgs'), IMGS_DST)
# HTML
html = [os.path.join('extension', 'html', 'background.html'),
os.path.join('extension', 'html', 'popup.html'),
os.path.join('extension', 'src', 'options', 'options.html'),
os.path.join(RPF_ROOT, 'html', 'console.html')]
for html_file in html:
shutil.copy(html_file, EXTENSION_DST)
# Scripts
scripts = []
for target in js_targets:
shutil.copy(os.path.join(GENFILES_ROOT, ('%s_script.js' % target)),
EXTENSION_DST)
shutil.copy(os.path.join('common', 'extension', 'analytics', 'analytics.js'),
EXTENSION_DST)
# Changes the name from page_script.js to options_script.js.
shutil.move(os.path.join(EXTENSION_DST, 'page_script.js'),
os.path.join(EXTENSION_DST, 'options_script.js'))
CopyAceFiles()
if not result['build_extension_only']:
CopyServerFiles()
if __name__ == '__main__':
main()
| Python |
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for all request handlers.
Provides functionality useful to all request handlers, including extraction and
validation of request parameters.
"""
__author__ = 'alexto@google.com (Alexis O. Torres)'
import json
import logging
import os
import urllib
import webapp2
from google.appengine.api import users
from google.appengine.ext import ereporter
from google.appengine.ext.webapp import template
import root
ereporter.register_logger()
class Error(webapp2.HTTPException):
"""Base class for all exceptions defined in this module."""
# TODO (jason.stredwick): Discover what the removed params were used for;
# url and fp.
def __init__(self, msg=None, code=400, hdrs=None):
"""Base Error class for the BITE server.
Args:
msg: The message to send to the user. (string or None)
code: The status code for the user. (integer)
hdrs: A map of header information to user. (dict or None)
"""
self.msg = msg
self.code = code
self.hdrs = hdrs
class MissingRequiredParameterError(Error):
"""Raised when the request is missing a required parameter."""
def __init__(self, parameter_name):
msg = 'Request missing required parameter: %s' % parameter_name
Error.__init__(self, msg=msg)
class InvalidIntValueError(Error):
"""Raised when a request parameter is expected to be an int, but it isn't."""
def __init__(self, parameter_name, parameter_value):
msg = ('The specified value for parameter "%s" is not '
'a valid int: %s' % (parameter_name, parameter_value))
Error.__init__(self, msg=msg)
class BaseHandler(webapp2.RequestHandler):
"""Base class for the application handlers.
Defines common functionality used by various handlers. As a rule of thumb,
most code that extracts and validates parameters from the request belongs to
this class.
If any of the validations fails, one of the exceptions defined in this module
is raised; all of which inherits from the Error class, also defined in this
module.
The most basic type of retrieval is to retrieve an optional str
argument from the request. This is accomplished by calling
GetOptionalParameter, for example:
value = self.GetOptionalParameter('optional_param_name')
value = self.GetOptionalParameter('optional_param_name', 'default_value')
If the parameter is required by the request handler, this can be enforced
by calling GetRequiredParameter, for example
value = self.GetRequiredParameter('required_param_name')
In addition to enforcing wheter a parameter is required or not, there are
variations to enforce the parameter value is of a specific type. Some of
the methods we have implemented at the moment retrieve an optional int
and a required URL, for example:
# Note that 10 is just an optional default value.
value = self.GetOptionalIntParameter('int_parameter_name', 10)
"""
def handle_exception(self, exception, debug):
logging.exception('Exception handled by common.handlers.base.BaseHandler.')
# If the exception is a HTTPException, use its error code.
# Otherwise use a generic 500 error code.
if isinstance(exception, webapp2.HTTPException):
if exception.hdrs is not None and exception.hdrs:
for (k, v) in exception.hdrs.items():
self.response.headers[k] = v
self.response.set_status(exception.code)
if exception.msg is not None:
logging.exception(exception.msg)
self.response.write(exception.msg)
else:
try:
logging.exception(exception)
except Exception:
pass
self.response.set_status(500)
self.response.write('Unmanaged exception')
def GetData(self, expected_kind):
"""Retrieve data from the request.
All bugs api calls that have data will pass that data as a JSON string in
the body of the request. Retrieve and parse that data for the handler
that requests it.
Args:
expected_kind: The kind of data expected. (bugs.kind.Kind)
Returns:
An object holding the data for the request. (dict)
Raises:
Error: Raised if the request data is missing or invalid.
"""
content_type = self.request.headers['Content-Type']
if 'application/json' not in content_type:
logging.info('Content-Type: %s', content_type)
raise Error('Content-Type must be application/json.\n', code=415)
body = self.request.body.decode('utf-8')
if body is None:
raise Error('JSON data required in message body.\n', code=400)
try:
data_json_str = urllib.unquote(body)
parsed_data = json.loads(data_json_str)
self.ValidateData(parsed_data, expected_kind)
except AssertionError, e:
raise Error('Invalid JSON data.\n%s\n' % e, code=400)
except Exception, e:
msg = ('Failure parsing request data.\n'
'Request data required.\n'
'%s\n' % e)
raise Error(msg, code=400)
return parsed_data
def GetEmail(self, override_email=None):
"""Get the current user's email if logged in or override if admin.
Gets the current user's email if they are logged in. If the user is an
admin and an alternative email is specified in the REST call then given
email will be used instead of the actual user.
Args:
override_email: An alternative email to use rather than the current users
email; must have admin privilege. (string or None)
Returns:
User email. (string)
"""
current_user = users.get_current_user()
user_email = None
if current_user:
user_email = current_user.email()
if override_email and users.is_current_user_admin():
# If current user is an admin allow the overriding of the user_email.
user_email = override_email
return user_email
def GetRequiredParameter(self, parameter_name):
"""Retrieves the value of a required request parameter.
Args:
parameter_name: Name of the parameter to get from the request.
Returns:
The value of the specified parameter as a str.
Raises:
MissingRequiredParameterError: The specified parameter was not found in
the request.
"""
str_value = self.GetOptionalParameter(parameter_name)
if not str_value:
raise MissingRequiredParameterError(parameter_name)
return str_value
def GetOptionalParameter(self, parameter_name, default_value=None):
"""Retrieves the value of an optional request parameter.
Args:
parameter_name: Name of the parameter to get from the request.
default_value: Value to return if the parameter is not found.
Returns:
The value of the specified parameter as a str, or default_value
if the parameter was not present in the request.
"""
return self.request.get(parameter_name, default_value)
def GetOptionalIntParameter(self, parameter_name, default_value):
"""Retrieves the value of an optional request parameter.
Args:
parameter_name: Name of the parameter to get from the request.
default_value: Value to return if the parameter is not found.
Returns:
An int object with the value of the specified parameter as a str.
Raises:
InvalidIntValueError: The value of the specified parameter is
not a valid integer number.
"""
str_value = self.GetOptionalParameter(parameter_name)
# If the following line raises a ValueError, the calling code
# has a bug where they passed an invalid default_value. We let
# that exception propagate, causing a 400 response to client and
# sufficient error logging.
if not str_value:
return int(default_value)
try:
return int(str_value)
except ValueError:
raise InvalidIntValueError(parameter_name, str_value)
def RenderTemplate(self, name, template_args, path='templates'):
"""Renders the specified django template.
If path is not specified, the we assume the hander and templates are on
different folders:
- root
- handlers
- templates
Args:
name: Str name of the file template.
template_args: Dict argument passed to the template.
path: relative path to the template.
"""
path = os.path.join(root.ROOT, path, name)
self.response.out.write(template.render(path, template_args))
def ValidateData(self, data, expected_kind):
"""Ensure the data is of the right kind by performing using assertions.
Args:
data: The data to validate. (dict)
expected_kind: The expected kind for the data. (bugs.kind.Kind)
Raises:
AssertionError: Raised if an assertion is False.
"""
assert data, 'No data provided.\n'
assert isinstance(data, dict), 'Data is not a dictionary.\n'
assert 'kind' in data, 'Data is missing kind information.\n'
assert data['kind'] == expected_kind, ('Data has invalid kind; [expected='
'%s] != [found=%s].\n' %
(expected_kind, data['kind']))
def WriteResponse(self, data):
"""Writes a response to the caller and passes on the given data.
All bugs api calls that return data will have the data converted to a JSON
string. All data returned must be an object.
Args:
data: The object containing data to pass back to the caller. (dict)
Raises:
Error: Raised if the data could not be converted to a JSON string.
"""
try:
assert data, 'Data required.'
assert isinstance(data, dict), 'Response data is not a dictionary.'
self.response.headers['Content-Type'] = 'application/json'
json.dump(data, self.response.out, indent=2)
self.response.out.write('\n')
except (AssertionError, Exception), e:
raise Error('Invalid response data.\n%s\n' % e, code=400)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define utility functions related to class attributes."""
__author__ = 'jason.stredwick@gmail.com (Jason Stredwick)'
def GetPODAttrsValue(obj_def, exceptions=None):
"""Returns a list of values for attributes in POD object definitions.
This method does not work for instances. It also will not return private
data members.
Args:
obj_def: The object definition. (class definition)
exceptions: Attributes to ignore. (list of string)
Returns:
The user-defined attributes. (list of string)
"""
exceptions = exceptions or []
if not obj_def:
return []
return [getattr(obj_def, attr) for attr in dir(obj_def)
if not callable(attr) and not attr.startswith("__") and
attr not in exceptions]
def GetPODAttrs(obj_def, exceptions=None):
"""Returns a list of attributes from POD object definitions.
This method does not work for instances. It also will not return private
data members.
Args:
obj_def: The object definition. (class definition)
exceptions: Attributes to ignore. (list of string)
Returns:
The user-defined attributes. (list of string)
"""
exceptions = exceptions or []
if not obj_def:
return []
return [attr for attr in dir(obj_def)
if not callable(attr) and not attr.startswith("__") and
attr not in exceptions]
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build the RPF Extension."""
__author__ = ('ralphj@google.com (Julie Ralph)'
'jason.stredwick@gmail.com (Jason Stredwick)',
'phu@google.com (Po Hu)')
from buildhelper import *
import logging
import optparse
import os
import shutil
import subprocess
import sys
import time
import urllib
import zipfile
COMPILE_CLOSURE_COMMAND = ' '.join([
sys.executable, CLOSURE_COMPILER,
('--root=%s' % os.path.join('common', 'extension')),
('--root=%s' % os.path.join(RPF_ROOT, 'src')),
('--root=%s' % DEPS['closure-library'][ROOT]),
('--root=%s' % SOY_COMPILER_SRC),
('--root=%s' % GENFILES_ROOT),
('--root=%s' % DEPS['selenium-atoms-lib'][ROOT]),
'--input=%(input)s',
'--output_mode=compiled',
'--output_file=%(output)s',
('--compiler_jar=%s' % CLOSURE_COMPILER_JAR)] + COMPILER_FLAGS)
def main():
result = ParseOptions()
# Compile the closure scripts.
# Soy
soy_files = {
'popup': os.path.join(RPF_ROOT, 'templates'),
'common_ux': os.path.join('common', 'extension', 'ux'),
'rpfconsole': os.path.join(RPF_ROOT, 'templates'),
'rpf_dialogs': os.path.join(RPF_ROOT, 'templates'),
'locatorsupdater': os.path.join(RPF_ROOT, 'templates')
}
ps = []
current_time = time.time()
for soy_filename in soy_files:
soy_filepath = soy_files[soy_filename]
ps.append(CompileScript(soy_filename, soy_filepath, '.soy', '.soy.js',
SOY_COMPILER_COMMAND))
WaitUntilSubprocessesFinished(ps)
ps = []
# JavaScript
js_targets = {
'background': os.path.join(RPF_ROOT, 'src', 'base'),
'getactioninfo': os.path.join(RPF_ROOT, 'src', 'libs'),
'console': os.path.join(RPF_ROOT, 'src', 'libs'),
'elementhelper': os.path.join('common', 'extension', 'dom'),
'popup': os.path.join(RPF_ROOT, 'src', 'base')
}
for target in js_targets:
target_filepath = js_targets[target]
ps.append(CompileScript(target, target_filepath, '.js', '_script.js',
COMPILE_CLOSURE_COMMAND))
WaitUntilSubprocessesFinished(ps)
print 'Totally %s (s) elapsed!' % (time.time() - current_time)
if not result['build_extension_only']:
# Remove the outputs, so they will be created again.
if os.path.exists(OUTPUT_ROOT):
shutil.rmtree(OUTPUT_ROOT)
os.mkdir(OUTPUT_ROOT)
# Create extension bundle.
print('Creating extension bundle.')
# Create the extension bundle and options path.
paths = [EXTENSION_DST, OPTIONS_DST, STYLES_DST]
for path in paths:
if not os.path.exists(path):
os.mkdir(path)
# Manifest
shutil.copy(os.path.join(RPF_ROOT, 'manifest.json'), EXTENSION_DST)
# Styles
styles = [os.path.join('extension', 'styles', 'consoles.css'),
os.path.join('extension', 'styles', 'options.css'),
os.path.join('extension', 'styles', 'popup.css'),
os.path.join('extension', 'styles', 'rpf_console.css'),
os.path.join(RPF_ROOT, 'styles', 'recordmodemanager.css')]
for style in styles:
shutil.copy(style, STYLES_DST)
# Images
shutil.copytree(os.path.join('extension', 'imgs'), IMGS_DST)
# HTML
html = [os.path.join('extension', 'html', 'background.html'),
os.path.join('extension', 'html', 'popup.html'),
os.path.join('extension', 'src', 'options', 'options.html'),
os.path.join(RPF_ROOT, 'html', 'console.html')]
for html_file in html:
shutil.copy(html_file, EXTENSION_DST)
# Scripts
scripts = []
for target in js_targets:
shutil.copy(os.path.join(GENFILES_ROOT, ('%s_script.js' % target)),
EXTENSION_DST)
shutil.copy(os.path.join('common', 'extension', 'analytics', 'analytics.js'),
EXTENSION_DST)
CopyAceFiles()
if not result['build_extension_only']:
CopyServerFiles()
if __name__ == '__main__':
main()
| Python |
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template import loader, Context, RequestContext
from django.template.defaultfilters import slugify
from django.shortcuts import render_to_response
from django import forms
def redirect(view, params):
return HttpResponseRedirect(reverse(view, None, params))
def render(template, context, request):
t = loader.get_template(template)
c = RequestContext(request, context)
return render_to_response(template, context, context_instance=RequestContext(request))
def serveFile(file, request):
return HttpResponse(open(file).read())
def sort(list, field):
list.sort(key=lambda obj:obj[field].lower())
def sortByName(list):
sort(list, 'name')
def getFileFromPost(request, fileParam):
if fileParam in request.FILES:
return request.FILES[fileParam]
else:
return None
| Python |
#!/usr/bin/env python2.4
import os, urllib, time
CATEGORIES = (
"General",
"People",
"Objects",
"Dates",
)
fin = open("pages.list", 'r')
categories = {}
current = None
for line in fin:
line = line.strip()
if line[:2] == '--' and line[-2:] == '--':
current = line[2:-2]
if current not in categories:
categories[current] = []
else:
categories[current].append(line)
fin.close()
for cat in CATEGORIES:
if cat in categories:
for page in categories[cat]:
print "Retrieving page", page, "..."
url = "http://simple.wikipedia.org/wiki/%s" % (page)
httpin = urllib.urlopen(url)
html = httpin.read()
httpin.close()
if "ERR_ACCESS_DENIED" in html:
print " ...access denied :("
fout = open(os.path.join('raw', page), 'w')
fout.write('CATEOGRY:' + cat + '\n')
fout.write(html)
fout.close()
time.sleep(2)
| Python |
#!/usr/bin/env python2.4
import os, urllib, time
CATEGORIES = (
"General",
"People",
"Objects",
"Dates",
)
fin = open("pages.list", 'r')
categories = {}
current = None
for line in fin:
line = line.strip()
if line[:2] == '--' and line[-2:] == '--':
current = line[2:-2]
if current not in categories:
categories[current] = []
else:
categories[current].append(line)
fin.close()
for cat in CATEGORIES:
if cat in categories:
for page in categories[cat]:
print "Retrieving page", page, "..."
url = "http://simple.wikipedia.org/wiki/%s" % (page)
httpin = urllib.urlopen(url)
html = httpin.read()
httpin.close()
if "ERR_ACCESS_DENIED" in html:
print " ...access denied :("
fout = open(os.path.join('raw', page), 'w')
fout.write('CATEOGRY:' + cat + '\n')
fout.write(html)
fout.close()
time.sleep(2)
| Python |
#!/usr/bin/env python2.4
import os, pickle
import nltk
import knowledge, parse
CATEGORIES = (
"General",
"People",
"Objects",
"Dates",
)
EXCLUDE = (
"Bad Cases",
)
CREATION_VERBS = (
'painted',
'paints',
'wrote',
'writes',
'built',
'builds',
'created',
'creates',
'sculpted',
'sculpts',
'made',
'makes',
)
BIRTH_VERBS = (
'was born',
)
DEATH_VERBS = (
'died',
'dies',
'was killed',
)
def debug(*strings):
return
for s in strings:
print s,
print
def get_type(node):
if type(node) == str:
return node
elif type(node) == tuple:
return node[0]
elif isinstance(node, nltk.tree.Tree):
for k, v in node.node.items():
if type(k) == nltk.featstruct.Feature and k.name == 'type':
return v
debug("Type not known...")
return "unknown"
def get_children(tree):
types = []
nodes = []
for node in tree:
types.append(get_type(node))
if type(node) == str:
nodes.append(None)
elif type(node) == tuple:
nodes.append(node[1])
elif isinstance(node, nltk.tree.Tree):
nodes.append(node)
else:
debug("Found child node of type", type(node))
return tuple(types), tuple(nodes)
def label_tree(tree, index=0):
for i, node in enumerate(tree):
if type(node) == str:
tree[i] = (tree[i], index)
index += 1
else:
index = label_tree(tree[i], index)
return index
def get_word(idx1, idx2):
return get_words(idx1)[idx2]
def get_words(index_or_words):
if type(index_or_words) == int:
return data['sentences'][index_or_words]['words']
else:
return index_or_words
def understand_sentence(tree, index_or_words, slots=None):
slots = slots or {
'subject': None,
'verb': None,
'object': None,
'location': None,
'date': None,
'start': None,
'end': None,
}
debug("Understanding [sentence]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('S', '.'):
return understand_sentence(nodes[0], index_or_words, slots)
elif types == ('NP', 'VP'):
return understand_verbphrase(
nodes[1],
index_or_words,
understand_subject(
nodes[0],
index_or_words,
slots
)
)
elif types == ('PP', 'NP', 'VP'):
return understand_prepphrase(
nodes[0],
index_or_words,
understand_verbphrase(
nodes[2],
index_or_words,
understand_subject(
nodes[1],
index_or_words,
slots
)
)
)
elif types == ('NP', ',', 'VP', 'PPE'):
return understand_prepphrase(
nodes[3],
index_or_words,
understand_verbphrase(
nodes[2],
index_or_words,
understand_subject(
nodes[0],
index_or_words,
slots
)
)
)
elif types == ('PP', ',', 'NP', 'VP'):
return understand_prepphrase(
nodes[0],
index_or_words,
understand_verbphrase(
nodes[3],
index_or_words,
understand_subject(
nodes[2],
index_or_words,
slots
)
)
)
elif types == ('CD', ',', 'NP', 'VP'):
word = get_word(index_or_words, nodes[0])
slots['date'] = resolve_date(word)
return understand_verbphrase(
nodes[3],
index_or_words,
understand_subject(
nodes[2],
index_or_words,
slots
)
)
elif types == ('CD', ',', 'VP'):
debug("Found CD , VP sentence:", get_words(index_or_words))
return None
else:
return None
def understand_subject(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [subject]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('NP', 'CC', 'NP'):
debug("Found NP CC NP subject:", get_words(index_or_words))
return None
elif types == ('NP', 'PPE'):
return understand_prepphrase(
nodes[1],
index_or_words,
understand_subject(
nodes[0],
index_or_words,
slots
)
)
elif types == ('NP', '(', 'ANYTHING', ')'):
return understand_subject(
nodes[0],
index_or_words,
slots
)
elif types == ('J', 'NP'):
return understand_subject(
nodes[1],
index_or_words,
slots
)
elif types == ('DT', 'NP'):
return understand_subject(
nodes[1],
index_or_words,
slots
)
elif types == ('PS', 'NP'):
debug("Found PS NP subject:", get_words(index_or_words))
return None
elif types == ('N',):
slots = understand_noun(
nodes[0],
index_or_words,
slots
)
if not slots:
return None
slots['subject'] = slots['noun']
del slots['noun']
return slots
else:
return None
def understand_object(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [object]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('NP', 'CC', 'NP'):
debug("Found NP CC NP object:", get_words(index_or_words))
return None
elif types == ('NP', 'PPE'):
# TODO: Something else here?
return understand_prepphrase(
nodes[1],
index_or_words,
understand_object(
nodes[0],
index_or_words,
slots
)
)
elif types == ('NP', '(', 'ANYTHING', ')'):
return understand_object(
nodes[0],
index_or_words,
slots
)
elif types == ('J', 'NP'):
return understand_object(
nodes[1],
index_or_words,
slots
)
elif types == ('DT', 'NP'):
return understand_object(
nodes[1],
index_or_words,
slots
)
elif types == ('PS', 'NP'):
debug("Found PS NP object:", get_words(index_or_words))
return None
elif types == ('N',):
slots = understand_noun(
nodes[0],
index_or_words,
slots
)
if not slots:
return None
slots['object'] = slots['noun']
del slots['noun']
return slots
else:
return None
def understand_prepNP(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [prepNP]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('NP', 'CC', 'NP'):
debug("Found NP CC NP prepNP:", get_words(index_or_words))
return None
elif types == ('NP', 'PPE'):
# TODO: Something else here?
return understand_prepphrase(
nodes[1],
index_or_words,
understand_prepNP(
nodes[0],
index_or_words,
slots
)
)
elif types == ('NP', '(', 'ANYTHING', ')'):
return understand_prepNP(
nodes[0],
index_or_words,
slots
)
elif types == ('J', 'NP'):
return understand_prepNP(
nodes[1],
index_or_words,
slots
)
elif types == ('DT', 'NP'):
return understand_prepNP(
nodes[1],
index_or_words,
slots
)
elif types == ('PS', 'NP'):
debug("Found PS NP prepNP:", get_words(index_or_words))
return None
elif types == ('N',):
slots = understand_noun(
nodes[0],
index_or_words,
slots
)
if not slots:
return None
slots['aux'] = slots['noun']
del slots['noun']
return slots
else:
return None
def understand_noun(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [noun]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('N', 'N'):
slots = understand_noun(nodes[0], index_or_words, slots)
if not slots:
return None
noun = slots['noun']
slots = understand_noun(nodes[1], index_or_words, slots)
if not slots:
return None
slots['noun'] = combine_nouns(noun, slots['noun'])
return slots
elif types == ('N', 'POS'):
debug("Found N POS noun:", get_words(index_or_words))
slots = understand_noun(nodes[0], index_or_words, slots)
return slots
# TODO: More here?
#if not slots:
# return None
#noun = slots['noun']
elif types == ('PRP',):
word = get_word(index_or_words, nodes[0]).lower()
typ = None
if word in ('him', 'her', 'he', 'she'):
typ = "PERSON"
elif word in ('it', 'that'):
typ = "THING"
elif word in ('them', 'they'):
typ = "PEOPLE"
elif word in ('those'):
typ = "THINGS"
else:
debug("Found unidentified pronoun", word)
slots['noun'] = resolve_noun(index_or_words, nodes[0], 'pronoun', typ)
return slots
elif types == ('WP',):
word = get_word(index_or_words, nodes[0])
debug("Found WP noun:", word)
return None
elif types == ('NN',)\
or types == ('NNP',)\
or types == ('NNS',)\
or types == ('NNPS',):
slots['noun'] = resolve_noun(index_or_words, nodes[0], "noun")
return slots
elif types == ('ENT',):
tys, nds = get_children(nodes[0])
typ = tys[0].split('-')[1]
slots['noun'] = resolve_noun(index_or_words, nds[0], 'noun', typ)
return slots
elif types == ('CD',):
word = get_word(index_or_words, nodes[0])
slots['noun'] = resolve_date(word)
if not slots['noun']:
slots['noun'] = resolve_noun(index_or_words, nodes[0], "noun")
return slots
else:
return None
def understand_verbphrase(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [verbphrase]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('V', 'NP'):
return understand_verb(
nodes[0],
index_or_words,
understand_object(
nodes[1],
index_or_words,
slots
)
)
elif types == ('V', 'RP', 'NP'):
return understand_verb(
nodes[0],
index_or_words,
understand_object(
nodes[2],
index_or_words,
slots
)
)
elif types == ('V', 'PPE', 'NP'):
return understand_verb(
nodes[0],
index_or_words,
understand_object(
nodes[2],
index_or_words,
slots
)
)
elif types == ('V',):
return understand_verb(
nodes[0],
index_or_words,
slots
)
elif types == ('VP', 'PPE'):
return understand_prepphrase(
nodes[1],
index_or_words,
understand_verbphrase(
nodes[0],
index_or_words,
slots
)
)
elif types == ('R', 'VP'):
return understand_verbphrase(
nodes[1],
index_or_words,
slots
)
else:
return None
def understand_verb(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [verb]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('VBZ', 'V')\
or types == ('VBD', 'V'):
slots = understand_verb(nodes[1], index_or_words, slots)
word = get_word(index_or_words, nodes[0])
slots['verb'] = word + ' ' + slots['verb']
return slots
else:
word = get_word(index_or_words, nodes[0])
slots['verb'] = word
return slots
def understand_prepphrase(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [prepphrase]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == (',', 'PP'):
return understand_prepphrase(
nodes[1],
index_or_words,
slots
)
elif types == ('PP',):
return understand_prepphrase(
nodes[0],
index_or_words,
slots
)
elif types == ('P', 'NP'):
orig = slots
slots = understand_prepNP(
nodes[1],
index_or_words,
slots
)
if not slots:
return orig
tys, nds = get_children(nodes[0])
word = get_word(index_or_words, nds[0]).lower()
if word == 'at' and type(slots['aux']) in (str, knowledge.Location):
slots['location'] = slots['aux']
elif word in ('in', 'on'):
if type(slots['aux']) == knowledge.Location:
slots['location'] = slots['aux']
elif type(slots['aux']) == knowledge.Date:
slots['date'] = slots['aux']
elif type(slots['aux']) == str:
slots['location'] = slots['aux']
elif word == 'to':
slots['location'] = slots['aux']
return slots
elif types == ('VBN', 'NP') or types == ('VBG', 'NP'):
debug("Found V* NP prepphrase:", get_words(index_or_words))
return slots
elif types == ('TO', 'VP'):
debug("Found TO VP prepphrase:", get_words(index_or_words))
return slots
else:
return slots
def resolve_noun(gindex, sindex, typ, subtype=None):
word = get_word(gindex, sindex)
debug("Resolving", word, "as", typ, subtype)
if typ == "noun":
if subtype == None:
m = match_fact(word, knowledge.Person)
if m:
return m
return word
elif subtype in ("PERSON", "GSP"):
debug("Found a person:", word)
match = match_fact(word, knowledge.Person)
# TODO: How sketchy is this?
if not match and ' ' in word:
return knowledge.Person(
name = word,
page = page,
)
elif not match:
return word
else:
return match
elif subtype == "ORGANIZATION":
debug("Found an organization:", word)
return match_fact(word, knowledge.Group)
elif subtype in ("LOCATION", "FACILITY", "GPE"):
debug("Found a location:", word)
match = match_fact(word, knowledge.Location)
if not match and subtype == "GPE":
return knowledge.Location(
name = word,
page = page,
)
elif subtype in ("TIME", "MONEY", "PERCENT", "DATE"):
debug("Found an odd noun type:", subtype, "for word", word)
return None
elif typ == "pronoun":
key = find_key(gindex, sindex, subtype)
if isinstance(key, knowledge.Fact):
return key
else:
return match_fact(key)
else:
debug("Bad noun type:", typ)
return None
def resolve_date(word):
debug("Resolving date", word)
m = match_fact(word, knowledge.Date)
if m:
return m
if word[-1] == 's' and len(word) == 5:
if word[-2] == '0':
if word[-3] == '0':
try:
return knowledge.Date(century=int(word[:-1]), page=page)
except:
return None
else:
try:
return knowledge.Date(decade=int(word[:-1]), page=page)
except:
pass
else:
return None
else:
try:
return knowledge.Date(year=int(word), page=page)
except:
return None
def combine_nouns(first, second):
debug("Trying to combine", first, "and", second)
if first is second:
return first
elif type(first) == str and type(second) == str:
return first + ' ' + second
return None
def find_key(gindex, sindex, subtype):
debug("Trying to find key of type", subtype)
# TODO: Resolution!
def match_fact(key, typ=None):
debug("Trying to match fact", key, "of type", typ)
results = []
realfacts = filter(lambda f: isinstance(f, knowledge.Fact), facts)
dates = filter(lambda f: isinstance(f, knowledge.Date), facts)
if typ == knowledge.Date:
for d in dates:
if key == str(d):
results.append(d)
else:
for f in [_ for _ in realfacts if (typ == None or isinstance(_, typ))]:
if type(key) in (list, tuple):
if all(k in f.name.split(' ') for k in key):
results.append(f)
elif type(key) == str:
if ' ' in key and key in f.name:
results.append(f)
elif key in f.name.split(' '):
results.append(f)
if len(results) == 1:
return results[0]
elif len(results) > 1:
for r in results:
if r.name.replace(' ', '_') == page:
return r
debug(" ...multiple result found for key", key)
return None
else:
debug(" ...no matches found.")
return None
def setup_known_facts(categories):
global facts, pages
for person in categories['People']:
if person in categories['Bad Cases']:
continue
for p in pages:
if p.url == person:
thispage = p
name = person.replace('_', ' ')
facts.add(knowledge.Person(
name=name,
page=thispage,
))
for obj in categories['Objects']:
if obj in categories['Bad Cases']:
continue
for p in pages:
if p.url == obj:
thispage = p
name = obj.replace('_', ' ')
facts.add(knowledge.Object(
name=name,
page=thispage,
))
def get_categories():
fin = open(os.path.join("data", "pages.list"), 'r')
categories = {}
current = None
for line in fin:
line = line.strip()
if line[:2] == '--' and line[-2:] == '--':
current = line[2:-2]
if current not in categories:
categories[current] = []
else:
categories[current].append(line)
fin.close()
return categories
def make_facts(slots):
if not slots:
return
subject = slots['subject']
verb = slots['verb']
object = slots['object']
date = slots['date']
start = slots['start']
end = slots['end']
location = slots['location']
if type(subject) == str:
m = match_fact(subject)
if m:
subject = m
if type(subject) == knowledge.Person:
instigator = subject
else:
instigator = None
if type(object) == str:
m = match_fact(object)
if m:
object = m
if type(object) == knowledge.Person:
participant = subject
else:
participant = None
if type(location) == str:
m = match_fact(location, knowledge.Location)
if m:
location = m
else:
location = knowledge.Location(
name = location,
page = page,
)
if type(date) == str:
m = match_fact(date, knowledge.Date)
if m:
date = m
else:
date = None
if type(start) == str:
m = match_fact(start, knowledge.Date)
if m:
start = m
else:
start = None
if type(end) == str:
m = match_fact(end, knowledge.Date)
if m:
end = m
else:
end = None
occurrence = None
if isinstance(subject, knowledge.Fact) and object:
if start and end:
occurrence = knowledge.Event(
name = str(subject) + ' ' + str(verb) + ' ' + str(object),
subject = subject,
start = start,
end = end,
where = location,
verb = verb,
object = object,
page = page,
instigators = set([instigator]),
participants = set([participant]),
)
else:
occurrence = knowledge.Event(
name = str(subject) + ' ' + str(verb) + ' ' + str(object),
when = date,
where = location,
subject = subject,
verb = verb,
object = object,
page = page,
instigators = set([instigator]),
participants = set([participant]),
)
elif isinstance(subject, knowledge.Fact):
if start and end:
occurrence = knowledge.Event(
name = str(subject) + ' ' + str(verb),
start = start,
end = end,
where = location,
subject = subject,
verb = verb,
object = object,
page = page,
participants = set([instigator]),
)
else:
occurrence = knowledge.Event(
name = str(subject) + ' ' + str(verb),
when = date,
where = location,
subject = subject,
verb = verb,
object = object,
page = page,
participants = set([instigator]),
)
if occurrence:
if occurrence.instigators == set([None]):
occurrence.instigators = None
if occurrence.participants == set([None]):
occurrence.participants = None
if location:
location.events.add(occurrence)
if instigator:
instigator.caused.add(occurrence)
if isinstance(participant, knowledge.Fact):
participant.participated.add(occurrence)
if occurrence.verb in CREATION_VERBS\
and isinstance(subject, knowledge.Person)\
and isinstance(object, knowledge.Object):
object.creator = subject
object.creation = occurrence
if occurrence.verb in BIRTH_VERBS\
and isinstance(subject, knowledge.Person):
subject.birth = occurrence
if occurrence.verb in DEATH_VERBS\
and isinstance(subject, knowledge.Person):
subject.death = occurrence
facts.add(occurrence)
for f in (subject, object, location, date):
if (isinstance(f, knowledge.Fact) or isinstance(f, knowledge.Date))\
and f not in facts:
facts.add(f)
else:
debug("Not adding", f)
def make_easy_facts(date, name, desc, verb):
global page
debug("Making easy facts for page", page)
m = match_fact(name, knowledge.Person)
if m:
subject = m
else:
if reduce(lambda a, b: a or b, [c in name for c in '0123456789[]().,']):
return
else:
subject = knowledge.Person(
name = name,
page = page,
)
m = match_fact(date, knowledge.Date)
if m:
date = m
else:
date = resolve_date(date)
occurrence = None
oldpage = page
for p in pages:
if subject.name.replace(' ', '_') == p.url:
page = p
occurrence = knowledge.Event(
name = str(subject) + ' ' + str(verb) + ' in ' + date.string(),
subject = subject,
when = date,
verb = verb,
page = page,
participants = set([subject]),
)
description = None
if desc:
description = knowledge.Event(
name = str(subject) + " was a " + desc,
subject = subject,
verb = "was a",
page = page,
participants = set([subject]),
object = desc,
)
subject.participated.add(description)
page = oldpage
if verb == 'was born':
if subject.birth:
if subject.birth.start and subject.birth.start.year:
return
else:
subject.birth.start = date
subject.birth.end = date
occurrence = None
else:
subject.birth = occurrence
elif verb == 'died':
if subject.death:
if subject.death.start and subject.death.start.year:
return
else:
subject.death.start = date
subject.death.end = date
occurrence = None
else:
subject.death = occurrence
if occurrence:
facts.add(occurrence)
for f in (subject, date, description):
if (isinstance(f, knowledge.Fact) or isinstance(f, knowledge.Date))\
and f not in facts:
facts.add(f)
else:
debug("Not adding", f)
def main():
global facts, pages, data, page
facts = set()
categories = get_categories()
# categories = { 'People': [], 'Objects': [], 'Dates': ["1550s"], }
# categories = {'People': ["Martin_Luther"], }
# categories = {
# 'General': [
# "Renaissance"
# ],
# 'People': [
# "Martin_Luther",
# "Johann_Gutenberg",
# "Guillaume_Dufay",
# "Claudio_Monteverdi",
# "Verrocchio",
# ],
# 'Objects': [
# "Mona_Lisa",
# ],
# }
pages = []
exclude = []
for c in CATEGORIES:
if c in categories:
pages.extend(categories[c])
for c in EXCLUDE:
if c in categories:
exclude.extend(categories[c])
for e in exclude:
pages.remove(e)
for i, p in enumerate(pages):
pages[i] = knowledge.Page(url=p)
setup_known_facts(categories)
for p in pages:
page = p
data = parse.get(p.url)
for i, d in enumerate(data['sentences']):
if d['tree']:
debug('\n')
debug(d['words'])
debug(d['tags'])
tree = d['tree']
label_tree(tree)
slots = understand_sentence(tree, i)
make_facts(slots)
debug(slots)
if data['listed']:
for d in data['listed']:
if not d['tree']:
continue
tree = d['tree']
label_tree(tree)
date = d['date']
when = None
start = None
end = None
if 'when' in date:
when = resolve_date(date['when'])
else:
start = resolve_date(date['start'])
end = resolve_date(date['end'])
slots = understand_sentence(tree, d['words'])
if not slots:
continue
if when:
slots['date'] = when
else:
slots['start'] = start
slots['end'] = end
make_facts(slots)
debug(slots)
if data['births']:
for date, name, desc in data['births']:
make_easy_facts(date['when'], name, desc, 'was born')
if data['deaths']:
for date, name, desc in data['deaths']:
make_easy_facts(date['when'], name, desc, 'died')
if __name__ == "__main__":
main()
for f in facts:
if not f.page:
debug("Fact has no page:", repr(f))
else:
if f not in f.page.facts and f not in f.page.dates:
if isinstance(f, knowledge.Fact):
f.page.add_fact(f)
elif isinstance(f, knowledge.Date):
f.page.add_date(f)
fout = open(os.path.join('data', 'known', 'facts.pkl'), 'w')
obj = facts, pages
pickle.dump(obj, fout)
fout.close()
| Python |
###############################################################################
"""
understand.py
this module is offically up and running. though changes may be made the
return type and format is going to stay the same.
input is raw text entered by the user. this is first processed to see if
the user already knew the fact presented. then the input is passed on to
get the noun phrases extracted.
understand.meaning() returns a list of lists (final_noun_phrases). the
first list contains one string with one of the following values to
indicate if the user already knew the fact presented: "yes", "no", or
"unknown". "unknown" only occurs when there is a one word responce that
can't be matched with the hard coded words.
note 1: the package "numpy" needs to be installed for this to work
it is used by nltk for part of speech tagging and can be found at
http://numpy.scipy.org/
"""
import nltk
"""
tag_input()
break input text into tokens and then give them part of speech tags
"""
def tag_input (input_text):
pos_tag_set = []
# tokenize text and then assign pos tags to each token
text = nltk.word_tokenize(input_text)
pos_tag_set = nltk.pos_tag(text)
return pos_tag_set
"""
trim_noun_phrase()
takes in a noun phrase and turns it into a list of strings
"""
def trim_noun_phrase (full_noun_phrase):
final_noun_phrase = []
# check if word is a DT or PP type and removes it if so
if (full_noun_phrase[0][1][0] == 'D' or
full_noun_phrase[0][1][0] == 'P'):
full_noun_phrase.remove(full_noun_phrase[0])
# makes a list of just the strings
for (x, y) in full_noun_phrase:
final_noun_phrase.append(x)
return final_noun_phrase
"""
get_noun_phrase()
extracts and returns noun phrases
"""
def get_noun_phrases (pos_tag_set):
full_noun_phrase = []
final_noun_phrases = []
#the pseudo regexp's for noun phrases (NPs).
# <DT|PP\$>?: an optional determiner (a,the) or posesive (his, hers).
# note: '$' is literal so needs to be backslashed
# <JJ.*>* zero or more adjuctives of any type
# <NN.*>+ one or more nouns of any type
# <CD> a number of sorts
grammar = "NP: {(<DT|PP\$>?<JJ.*>*<NN.*>+)|<CD>}"
#look for noun phrases and then parse sentence into tree
chunk_parser = nltk.RegexpParser(grammar)
chunk_grammar_tree = chunk_parser.parse(pos_tag_set)
#search the tree for noun phrases.
#when noun phrase is found trim it and append it to final_noun_phrases
for subtree in chunk_grammar_tree.subtrees(filter =
lambda t: t.node == 'NP'):
full_noun_phrase = list(subtree.leaves())
final_noun_phrases.append(trim_noun_phrase(full_noun_phrase))
return final_noun_phrases
"""
check_was_fact_known()
first cheks for one word answers likely to be typed as a responce
it is assumed the answer is positive (the user knew the fact) so
only negations are c hecked for.
this wil make lots of false positives (should really do something about
this!!!) if the user enters something not pertanent.
"""
def check_was_fact_known (pos_tag_set):
#assumed true unless negation is found
was_fact_known = ["yes"]
#lambda functions for finding a yes, no, negation, or quit instance
yes = lambda x: (x == "y" or x == "yes" or x == "yea" or x == "yep" or
x == "yup" or x == "yeah" or x == "aye" or x == "amen" or
x == "duh" or x == "arr" or x == "yeah" )
no = lambda x: (x == "n" or x == "no" or x == "nope" or x == "na" or
x == "nah" or x == "nay" or x == "nix" or x == "negative")
negation = lambda x: (x == "not" or x == "n't")
quit = lambda x: (x == "q" or x == "x" or x == "quit" or x == "exit")
#check first word for yes no or quit instance
if yes(pos_tag_set[0][0]):
was_fact_known = ["yes"]
elif no(pos_tag_set[0][0]):
was_fact_known = ["no"]
elif quit(pos_tag_set[0][0]):
was_fact_known = ["quit"]
elif len(pos_tag_set) == 1:
was_fact_known = [["error"], ["unknown one word answer"]]
#check for negation
for (x, y) in pos_tag_set:
if no(x) or negation(x):
was_fact_known = ["no"]
return was_fact_known
return was_fact_known
def get_meaning (input_text):
final_noun_phrases = []
was_fact_known = []
if len(input_text) == 0:
final_noun_phrases.extend([["error"], ["empty string"]])
return final_noun_phrases
pos_tag_set = tag_input(input_text)
#print nltk.ne_chunk(pos_tag_set)
final_noun_phrases.extend(check_was_fact_known(pos_tag_set))
final_noun_phrases.extend(get_noun_phrases(pos_tag_set))
return final_noun_phrases
| Python |
import os, pickle
MONTHS = [
"ZERO",
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
def nonrecursive(function):
'''
A decorator constructor for methods that forces them to return '...' if
they're called recursively on a single object. That is, during the expansion
of any call to the decorated method, calls to that method on the original
object will return '...' instead of the function's normal return value.
'''
def modified(self, recursion_tracker=set()):
if id(self) in recursion_tracker:
return '...'
recursion_tracker.add(id(self))
result = function(self)
recursion_tracker.remove(id(self))
return result
return modified
def load_facts():
fin = open(os.path.join('data', 'known', 'facts.pkl'), 'r')
facts, pages = pickle.load(fin)
fin.close()
return facts, pages
class Page(object):
facts = None
dates = None
url = None
empty = True
def __init__(self, facts=None, dates=None, url=None):
self.facts = facts or []
self.dates = dates or []
self.url = url
self.empty = True
if len(self.facts) > 1:
self.empty = False
def __repr__(self):
return "Page(facts=" + repr(self.facts) + ", " +\
"dates=" + repr(self.dates) + ", " +\
"url=" + repr(self.url) + ")"
def __str__(self):
return str(self.url).replace('_', ' ')
def add_fact(self, fact):
if fact not in self.facts:
self.facts.append(fact)
if len(self.facts) > 1:
self.empty = False
def add_date(self, date):
if date not in self.dates:
self.dates.append(date)
def remove_fact(self, fact):
if fact in self.facts:
self.facts.remove(fact)
if len(self.facts) <= 1:
self.empty = True
def remove_date(self, date):
if date in self.dates:
self.dates.remove(date)
class Fact(object):
"""
A common base class for all facts. Mostly just useful for isinstance() tests.
"""
name = ""
pronoun = "it"
page = None
def __init__(self, name="unnamed fact", pronoun="it", page=None):
self.name = name
self.page = page
self.pronoun = pronoun
@nonrecursive
def __repr__(self):
return "Fact(name=" + repr(self.name) + ", page=" + repr(self.page) + ")"
def __str__(self):
if self.name:
return str(self.name)
return "a nameless fact"
class Date(object):
century = None
decade = None
year = None
month = None
day = None
page = None
def __init__(self, day=None, month=None, year=None, decade=None,
century=None, page=None):
self.day = day
self.month = month
self.year = year
self.decade = decade
self.century = century
if self.year != None:
self.decade = (self.year / 10) * 10
if self.decade != None:
self.century = (self.decade / 100) * 100
self.page = page
#if self.year == None:
# if decade != None:
# year = decade
# elif century != None:
# year = century
def __str__(self):
if (self.day):
return MONTHS[self.month] + " " + str(self.day) + ", " + str(self.year)
if (self.month):
return MONTHS[self.month] + " " + str(self.year)
if(self.year) :
return str(self.year)
if (self.decade):
return str(self.decade) + "s"
else:
return str(self.century) + "s"
def string (self):
s = str(self)
if ' ' in s:
return s
if s[-1] == 's':
return "the " + s
return s
def within(self, begin, end):
selfyear = self.year or self.decade or self.century
beginyear = begin.year or begin.decade or begin.century
endyear = end.year or end.decade or end.century
return beginyear <= selfyear <= endyear
class Location(Fact):
"""
A Location represents a physical place. Locations have a set of events that
occured within them, as well as a set of locations that they are within and
locations that they contain (these lists may be partial, and should be
expounded via transitive closure).
"""
events = None
contains = None
within = None
def __init__(
self,
name="unnamed location",
events=None,
contains=None,
within=None,
pronoun="it",
page=None,
):
self.name = name
self.events = events or set()
self.contains = contains or set()
self.within = within or set()
self.pronoun = pronoun
self.page = page
@nonrecursive
def __repr__(self):
return "Location(name=" + repr(self.name) + ", " +\
"events=" + repr(self.events) + ", " +\
"contains=" + repr(self.contains) + ", " +\
"within=" + repr(self.within) + ", " +\
"pronoun=" + repr(self.pronoun) + ")"
class Person(Fact):
"""
A Person represents a historical personage. People have Events that signify
their birth and death, as well as a general set of events that they
participated in and a complimentary set of events that they caused.
"""
birth = None
death = None
caused = set()
participated = set()
def __init__(
self,
name="unnamed person",
birth=None,
death=None,
caused=None,
participated=None,
pronoun="ey",
page=None,
):
self.name = name
self.birth = birth
self.death = death
self.caused = caused or set()
self.participated = participated or set()
self.pronoun = pronoun
self.page = page
@nonrecursive
def __repr__(self):
return "Person(name=" + repr(self.name) + ", " +\
"birth=" + repr(self.birth) + ", " +\
"death=" + repr(self.death) + ", " +\
"caused=" + repr(self.caused) + ", " +\
"participated=" + repr(self.participated) + ", " +\
"pronoun=" + repr(self.pronoun) + ")"
class Event(Fact):
"""
An event represents an abstract occurrence. It has start and end times, and
a location in which it occurred. It also has a list of events that
contributed to it causally, and to which it contributed causally.
"""
start = None
end = None
location = None
causes = None
results = None
instigators = None
participants = None
subject = None
verb = None
object = None
def __init__(
self,
name="unnamed event",
start=None,
end=None,
when=None,
where=None,
causes=None,
results=None,
instigators=None,
participants=None,
subject=None,
verb=None,
object=None,
pronoun="it",
page=None,
):
self.name = name
if start or end:
self.start = start
self.end = end
elif when:
self.start = when
self.end = when
self.location = where
self.causes = causes or set()
self.results = results or set()
self.instigators = instigators or set()
self.participants = participants or set()
self.subject = subject
self.verb = verb
self.object = object
self.pronoun = pronoun
self.page = page
@nonrecursive
def __repr__(self):
return "Event(name=" + repr(self.name) + ", " +\
"start=" + repr(self.start) + ", " +\
"end=" + repr(self.end) + ", " +\
"location=" + repr(self.location) + ", " +\
"causes=" + repr(self.causes) + ", " +\
"results=" + repr(self.results) + ", " +\
"instigators=" + repr(self.instigators) + ", " +\
"participants=" + repr(self.participants) + ", " +\
"subject=" + repr(self.subject) + ", " +\
"verb=" + repr(self.verb) + ", " +\
"object=" + repr(self.object) + ", " +\
"pronoun=" + repr(self.pronoun) + ")"
class Group(Fact):
"""
A Group is any organizational unit of people. Groups have a list of members.
"""
members = None
def __init__(
self,
name="unnamed group",
members=None,
pronoun="they",
page=None,
):
self.name = name
self.members = members or set()
self.pronoun = pronoun
self.page = page
@nonrecursive
def __repr__(self):
return "Group(name=" + repr(self.name) + ", " +\
"members=" + repr(self.members) + ", " +\
"pronoun=" + repr(self.pronoun) + ")"
class Object(Fact):
"""
An Object is an inanimate artifact (presumably of historical significance).
Objects have a creator (which might be a Person or Group), an Event that
signifies their creation, and a location.
"""
creation = None
creator = None
location = None
def __init__(
self,
name="unnamed object",
creation=None,
creator=None,
location=None,
pronoun="it",
page=None,
):
self.name = name
self.creation = creation
self.creator = creator
self.location = location
self.pronoun = pronoun
self.page = page
@nonrecursive
def __repr__(self):
return "Group(name=" + repr(self.name) + ", " +\
"creation=" + repr(self.creation) + ", " +\
"creator=" + repr(self.creator) + ", " +\
"location=" + repr(self.location) + ", " +\
"pronoun=" + repr(self.pronoun) + ")"
| Python |
import manage, sys
manager = manage.Manager()
try: manager.converse()
except KeyboardInterrupt:
sys.exit() | Python |
from knowledge import *
x = Event(
name="a terrible tragedy",
)
print type(x)
y = Person(
name="Bond, James Bond",
birth=Event(
name="the birth of 'none'",
when=Date(1564, 3, 4)
),
)
x.instigators.add(y)
print [str(i) for i in x.instigators]
print x.instigators.pop().birth.start
print [str(i) for i in x.instigators]
| Python |
# OGDEN - last updated for NodeBox 1.9.4
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
# Based on Charles K. Ogden list of basic English words:
# http://ogden.basic-english.org
import os
path = os.path.join(os.path.dirname(__file__), "ogden_2000.txt")
words = open(path).readlines()
words = [x.split(" ") for x in words]
words.sort(lambda a, b: cmp(a[0].lower(), b[0].lower))
nouns = [word for word, tags in words if "NN" in tags]
verbs = [word for word, tags in words if "VB" in tags]
adjectives = [word for word, tags in words if "JJ" in tags]
adverbs = [word for word, tags in words if "RB" in tags]
words = [word for word, tags in words]
| Python |
# VERB - last updated for NodeBox 1rc7
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
# The verb.txt morphology was adopted from the XTAG morph_englis.flat:
# http://www.cis.upenn.edu/~xtag/
# Each verb and its tenses is a list,
# indexed according to the following keys:
verb_tenses_keys = {
"infinitive" : 0,
"1st singular present" : 1,
"2nd singular present" : 2,
"3rd singular present" : 3,
"present plural" : 4,
"present participle" : 5,
"1st singular past" : 6,
"2nd singular past" : 7,
"3rd singular past" : 8,
"past plural" : 9,
"past" : 10,
"past participle" : 11
}
verb_tenses_aliases = {
"inf" : "infinitive",
"1sgpres" : "1st singular present",
"2sgpres" : "2nd singular present",
"3sgpres" : "3rd singular present",
"pl" : "present plural",
"prog" : "present participle",
"1sgpast" : "1st singular past",
"2sgpast" : "2nd singular past",
"3sgpast" : "3rd singular past",
"pastpl" : "past plural",
"ppart" : "past participle"
}
# Each verb has morphs for infinitve,
# 3rd singular present, present participle,
# past and past participle.
# Verbs like "be" have other morphs as well
# (i.e. I am, you are, she is, they aren't)
# Additionally, the following verbs can be negated:
# be, can, do, will, must, have, may, need, dare, ought.
verb_tenses = {}
import os
path = os.path.join(os.path.dirname(__file__), "verb.txt")
data = open(path).readlines()
for i in range(len(data)):
a = data[i].strip().split(",")
verb_tenses[a[0]] = a
# Each verb can be lemmatised:
# inflected morphs of the verb point
# to its infinitive in this dictionary.
verb_lemmas = {}
for infinitive in verb_tenses:
for tense in verb_tenses[infinitive]:
if tense != "":
verb_lemmas[tense] = infinitive
def verb_infinitive(v):
""" Returns the uninflected form of the verb.
"""
try:
return verb_lemmas[v]
except:
return ""
def verb_conjugate(v, tense="infinitive", negate=False):
"""Inflects the verb to the given tense.
For example: be
present: I am, you are, she is,
present participle: being,
past: I was, you were, he was,
past participle: been,
negated present: I am not, you aren't, it isn't.
"""
v = verb_infinitive(v)
i = verb_tenses_keys[tense]
if negate is True: i += len(verb_tenses_keys)
return verb_tenses[v][i]
def verb_present(v, person="", negate=False):
"""Inflects the verb in the present tense.
The person can be specified with 1, 2, 3, "1st", "2nd", "3rd", "plural", "*".
Some verbs like be, have, must, can be negated.
"""
person = str(person).replace("pl","*").strip("stndrgural")
hash = {
"1" : "1st singular present",
"2" : "2nd singular present",
"3" : "3rd singular present",
"*" : "present plural",
}
if person in hash \
and verb_conjugate(v, hash[person], negate) != "":
return verb_conjugate(v, hash[person], negate)
return verb_conjugate(v, "infinitive", negate)
def verb_present_participle(v):
"""Inflects the verb in the present participle.
For example:
give -> giving, be -> being, swim -> swimming
"""
return verb_conjugate(v, "present participle")
def verb_past(v, person="", negate=False):
"""Inflects the verb in the past tense.
The person can be specified with 1, 2, 3, "1st", "2nd", "3rd", "plural", "*".
Some verbs like be, have, must, can be negated.
For example:
give -> gave, be -> was, swim -> swam
"""
person = str(person).replace("pl","*").strip("stndrgural")
hash = {
"1" : "1st singular past",
"2" : "2nd singular past",
"3" : "3rd singular past",
"*" : "past plural",
}
if person in hash \
and verb_conjugate(v, hash[person], negate) != "":
return verb_conjugate(v, hash[person], negate)
return verb_conjugate(v, "past", negate)
def verb_past_participle(v):
"""Inflects the verb in the present participle.
For example:
give -> given, be -> been, swim -> swum
"""
return verb_conjugate(v, "past participle")
def verb_all_tenses():
"""Returns all possible verb tenses.
"""
return verb_tenses_keys.keys()
def verb_tense(v):
"""Returns a string from verb_tenses_keys representing the verb's tense.
For example:
given -> "past participle"
"""
infinitive = verb_infinitive(v)
a = verb_tenses[infinitive]
for tense in verb_tenses_keys:
if a[verb_tenses_keys[tense]] == v:
return tense
if a[verb_tenses_keys[tense]+len(verb_tenses_keys)] == v:
return tense
def verb_is_tense(v, tense, negated=False):
"""Checks whether the verb is in the given tense.
"""
if tense in verb_tenses_aliases:
tense = verb_tenses_aliases[tense]
if verb_tense(v) == tense:
return True
else:
return False
def verb_is_present(v, person="", negated=False):
"""Checks whether the verb is in the present tense.
"""
person = str(person).replace("*","plural")
tense = verb_tense(v)
if tense is not None:
if "present" in tense and person in tense:
if negated is False:
return True
elif "n't" in v or " not" in v:
return True
return False
def verb_is_present_participle(v):
"""Checks whether the verb is in present participle.
"""
tense = verb_tense(v)
if tense == "present participle":
return True
else:
return False
def verb_is_past(v, person="", negated=False):
"""Checks whether the verb is in the past tense.
"""
person = str(person).replace("*","plural")
tense = verb_tense(v)
if tense is not None:
if "past" in tense and person in tense:
if negated is False:
return True
elif "n't" in v or " not" in v:
return True
return False
def verb_is_past_participle(v):
"""Checks whether the verb is in past participle.
"""
tense = verb_tense(v)
if tense == "past participle":
return True
else:
return False
#print verb_present("have", person=3)
#print verb_present_participle("swim")
#print verb_past("swim")
#print verb_past_participle("give")
#print verb_tense("given")
#print verb_is_tense("am", "1st singular present")
#print verb_is_present("am", person=1, negated=False)
#print verb_is_present_participle("doing") | Python |
# Module wordnet.py
#
# Original author: Oliver Steele <steele@osteele.com>
# Project Page: http://sourceforge.net/projects/pywordnet
#
# Copyright (c) 1998-2004 by Oliver Steele. Use is permitted under
# the Artistic License
# <http://www.opensource.org/licenses/artistic-license.html>
"""Utility functions to use with the wordnet module.
Usage
-----
>>> dog = N['dog'][0]
# (First 10) adjectives that are transitively SIMILAR to the main sense of 'red'
>>> closure(ADJ['red'][0], SIMILAR)[:10]
['red' in {adjective: red, reddish, ruddy, blood-red, carmine, cerise, cherry, cherry-red, crimson, ruby, ruby-red, scarlet}, {adjective: chromatic}, {adjective: amber, brownish-yellow, yellow-brown}, {adjective: amethyst}, {adjective: aureate, gilded, gilt, gold, golden}, {adjective: azure, cerulean, sky-blue, bright blue}, {adjective: blue, bluish, blueish, light-blue, dark-blue, blue-black}, {adjective: bluish green, blue-green, cyan, teal}, {adjective: blushful, rosy}, {adjective: bottle-green}]
>>> # Adjectives that are transitively SIMILAR to any of the senses of 'red'
>>> #flatten1(map(lambda sense:closure(sense, SIMILAR), ADJ['red'])) # too verbose
>>> # Hyponyms of the main sense of 'dog'(n.) that are homophonous with verbs
>>> filter(lambda sense:V.get(sense.form), flatten1(map(lambda e:e.getSenses(), hyponyms(N['dog'][0]))))
['dog' in {noun: dog, domestic dog, Canis familiaris}, 'pooch' in {noun: pooch, doggie, doggy, barker, bow-wow}, 'toy' in {noun: toy dog, toy}, 'hound' in {noun: hound, hound dog}, 'basset' in {noun: basset, basset hound}, 'cocker' in {noun: cocker spaniel, English cocker spaniel, cocker}, 'bulldog' in {noun: bulldog, English bulldog}]
>>> # Find the senses of 'raise'(v.) and 'lower'(v.) that are antonyms
>>> filter(lambda p:p[0] in p[1].pointerTargets(ANTONYM), product(V['raise'].getSenses(), V['lower'].getSenses()))
[('raise' in {verb: raise, lift, elevate, get up, bring up}, 'lower' in {verb: lower, take down, let down, get down, bring down})]
"""
__author__ = "Oliver Steele <steele@osteele.com>"
__version__ = "2.0"
from wordnet import *
#
# Domain utilities
#
def _requireSource(entity):
if not hasattr(entity, 'pointers'):
if isinstance(entity, Word):
raise TypeError, `entity` + " is not a Sense or Synset. Try " + `entity` + "[0] instead."
else:
raise TypeError, `entity` + " is not a Sense or Synset"
def tree(source, pointerType):
"""
>>> dog = N['dog'][0]
>>> from pprint import pprint
>>> pprint(tree(dog, HYPERNYM))
['dog' in {noun: dog, domestic dog, Canis familiaris},
[{noun: canine, canid},
[{noun: carnivore},
[{noun: placental, placental mammal, eutherian, eutherian mammal},
[{noun: mammal},
[{noun: vertebrate, craniate},
[{noun: chordate},
[{noun: animal, animate being, beast, brute, creature, fauna},
[{noun: organism, being},
[{noun: living thing, animate thing},
[{noun: object, physical object}, [{noun: entity}]]]]]]]]]]]]
>>> #pprint(tree(dog, HYPONYM)) # too verbose to include here
"""
if isinstance(source, Word):
return map(lambda s, t=pointerType:tree(s,t), source.getSenses())
_requireSource(source)
return [source] + map(lambda s, t=pointerType:tree(s,t), source.pointerTargets(pointerType))
def closure(source, pointerType, accumulator=None):
"""Return the transitive closure of source under the pointerType
relationship. If source is a Word, return the union of the
closures of its senses.
>>> dog = N['dog'][0]
>>> closure(dog, HYPERNYM)
['dog' in {noun: dog, domestic dog, Canis familiaris}, {noun: canine, canid}, {noun: carnivore}, {noun: placental, placental mammal, eutherian, eutherian mammal}, {noun: mammal}, {noun: vertebrate, craniate}, {noun: chordate}, {noun: animal, animate being, beast, brute, creature, fauna}, {noun: organism, being}, {noun: living thing, animate thing}, {noun: object, physical object}, {noun: entity}]
"""
if isinstance(source, Word):
return reduce(union, map(lambda s, t=pointerType:tree(s,t), source.getSenses()))
_requireSource(source)
if accumulator is None:
accumulator = []
if source not in accumulator:
accumulator.append(source)
for target in source.pointerTargets(pointerType):
closure(target, pointerType, accumulator)
return accumulator
def hyponyms(source):
"""Return source and its hyponyms. If source is a Word, return
the union of the hyponyms of its senses."""
return closure(source, HYPONYM)
def hypernyms(source):
"""Return source and its hypernyms. If source is a Word, return
the union of the hypernyms of its senses."""
return closure(source, HYPERNYM)
def meet(a, b, pointerType=HYPERNYM):
"""Return the meet of a and b under the pointerType relationship.
>>> meet(N['dog'][0], N['cat'][0])
{noun: carnivore}
>>> meet(N['dog'][0], N['person'][0])
{noun: organism, being}
>>> meet(N['thought'][0], N['belief'][0])
{noun: content, cognitive content, mental object}
"""
return (intersection(closure(a, pointerType), closure(b, pointerType)) + [None])[0]
#
# String Utility Functions
#
def startsWith(str, prefix):
"""Return true iff _str_ starts with _prefix_.
>>> startsWith('unclear', 'un')
1
"""
return str[:len(prefix)] == prefix
def endsWith(str, suffix):
"""Return true iff _str_ ends with _suffix_.
>>> endsWith('clearly', 'ly')
1
"""
return str[-len(suffix):] == suffix
def equalsIgnoreCase(a, b):
"""Return true iff a and b have the same lowercase representation.
>>> equalsIgnoreCase('dog', 'Dog')
1
>>> equalsIgnoreCase('dOg', 'DOG')
1
"""
# test a == b first as an optimization where they're equal
return a == b or string.lower(a) == string.lower(b)
#
# Sequence Utility Functions
#
def issequence(item):
"""Return true iff _item_ is a Sequence (a List, String, or Tuple).
>>> issequence((1,2))
1
>>> issequence([1,2])
1
>>> issequence('12')
1
>>> issequence(1)
0
"""
return type(item) in (ListType, StringType, TupleType)
def intersection(u, v):
"""Return the intersection of _u_ and _v_.
>>> intersection((1,2,3), (2,3,4))
[2, 3]
"""
w = []
for e in u:
if e in v:
w.append(e)
return w
def union(u, v):
"""Return the union of _u_ and _v_.
>>> union((1,2,3), (2,3,4))
[1, 2, 3, 4]
"""
w = list(u)
if w is u:
import copy
w = copy.copy(w)
for e in v:
if e not in w:
w.append(e)
return w
def product(u, v):
"""Return the Cartesian product of u and v.
>>> product("123", "abc")
[('1', 'a'), ('1', 'b'), ('1', 'c'), ('2', 'a'), ('2', 'b'), ('2', 'c'), ('3', 'a'), ('3', 'b'), ('3', 'c')]
"""
return flatten1(map(lambda a, v=v:map(lambda b, a=a:(a,b), v), u))
def removeDuplicates(sequence):
"""Return a copy of _sequence_ with equal items removed.
>>> removeDuplicates("this is a test")
['t', 'h', 'i', 's', ' ', 'a', 'e']
>>> removeDuplicates(map(lambda tuple:apply(meet, tuple), product(N['story'].getSenses(), N['joke'].getSenses())))
[{noun: message, content, subject matter, substance}, None, {noun: abstraction}, {noun: communication}]
"""
accumulator = []
for item in sequence:
if item not in accumulator:
accumulator.append(item)
return accumulator
#
# Tree Utility Functions
#
def flatten1(sequence):
accumulator = []
for item in sequence:
if type(item) == TupleType:
item = list(item)
if type(item) == ListType:
accumulator.extend(item)
else:
accumulator.append(item)
return accumulator
#
# WordNet utilities
#
GET_INDEX_SUBSTITUTIONS = ((' ', '-'), ('-', ' '), ('-', ''), (' ', ''), ('.', ''))
def getIndex(form, pos='noun'):
"""Search for _form_ in the index file corresponding to
_pos_. getIndex applies to _form_ an algorithm that replaces
underscores with hyphens, hyphens with underscores, removes
hyphens and underscores, and removes periods in an attempt to find
a form of the string that is an exact match for an entry in the
index file corresponding to _pos_. getWord() is called on each
transformed string until a match is found or all the different
strings have been tried. It returns a Word or None."""
def trySubstitutions(trySubstitutions, form, substitutions, lookup=1, dictionary=dictionaryFor(pos)):
if lookup and dictionary.has_key(form):
return dictionary[form]
elif substitutions:
(old, new) = substitutions[0]
substitute = string.replace(form, old, new) and substitute != form
if substitute and dictionary.has_key(substitute):
return dictionary[substitute]
return trySubstitutions(trySubstitutions, form, substitutions[1:], lookup=0) or \
(substitute and trySubstitutions(trySubstitutions, substitute, substitutions[1:]))
return trySubstitutions(returnMatch, form, GET_INDEX_SUBSTITUTIONS)
MORPHOLOGICAL_SUBSTITUTIONS = {
NOUN:
[('s', ''),
('ses', 's'),
('ves', 'f'),
('xes', 'x'),
('zes', 'z'),
('ches', 'ch'),
('shes', 'sh'),
('men', 'man'),
('ies', 'y')],
VERB:
[('s', ''),
('ies', 'y'),
('es', 'e'),
('es', ''),
('ed', 'e'),
('ed', ''),
('ing', 'e'),
('ing', '')],
ADJECTIVE:
[('er', ''),
('est', ''),
('er', 'e'),
('est', 'e')],
ADVERB: []}
def morphy(form, pos='noun', collect=0):
"""Recursively uninflect _form_, and return the first form found
in the dictionary. If _collect_ is true, a sequence of all forms
is returned, instead of just the first one.
>>> morphy('dogs')
'dog'
>>> morphy('churches')
'church'
>>> morphy('aardwolves')
'aardwolf'
>>> morphy('abaci')
'abacus'
>>> morphy('hardrock', 'adv')
"""
from wordnet import _normalizePOS, _dictionaryFor
pos = _normalizePOS(pos)
fname = os.path.join(WNSEARCHDIR, {NOUN: 'noun', VERB: 'verb', ADJECTIVE: 'adj', ADVERB: 'adv'}[pos] + '.exc')
excfile = open(fname)
substitutions = MORPHOLOGICAL_SUBSTITUTIONS[pos]
def trySubstitutions(trySubstitutions, # workaround for lack of nested closures in Python < 2.1
form, # reduced form
substitutions, # remaining substitutions
lookup=1,
dictionary=_dictionaryFor(pos),
excfile=excfile,
collect=collect,
collection=[]):
import string
exceptions = binarySearchFile(excfile, form)
if exceptions:
form = exceptions[string.find(exceptions, ' ')+1:-1]
if lookup and dictionary.has_key(form):
if collect:
collection.append(form)
else:
return form
elif substitutions:
old, new = substitutions[0]
substitutions = substitutions[1:]
substitute = None
if endsWith(form, old):
substitute = form[:-len(old)] + new
#if dictionary.has_key(substitute):
# return substitute
form = trySubstitutions(trySubstitutions, form, substitutions) or \
(substitute and trySubstitutions(trySubstitutions, substitute, substitutions))
return (collect and collection) or form
elif collect:
return collection
return trySubstitutions(trySubstitutions, form, substitutions)
#
# Testing
#
def _test(reset=0):
import doctest, wntools
if reset:
doctest.master = None # This keeps doctest from complaining after a reload.
return doctest.testmod(wntools)
| Python |
# Module wordnet.py
#
# Original author: Oliver Steele <steele@osteele.com>
# Project Page: http://sourceforge.net/projects/pywordnet
#
# Copyright (c) 1998-2004 by Oliver Steele. Use is permitted under
# the Artistic License
# <http://www.opensource.org/licenses/artistic-license.html>
"""An OO interface to the WordNet database.
Usage
-----
>>> from wordnet import *
>>> # Retrieve words from the database
>>> N['dog']
dog(n.)
>>> V['dog']
dog(v.)
>>> ADJ['clear']
clear(adj.)
>>> ADV['clearly']
clearly(adv.)
>>> # Examine a word's senses and pointers:
>>> N['dog'].getSenses()
('dog' in {noun: dog, domestic dog, Canis familiaris}, 'dog' in {noun: frump, dog}, 'dog' in {noun: dog}, 'dog' in {noun: cad, bounder, blackguard, dog, hound, heel}, 'dog' in {noun: frank, frankfurter, hotdog, hot dog, dog, wiener, wienerwurst, weenie}, 'dog' in {noun: pawl, detent, click, dog}, 'dog' in {noun: andiron, firedog, dog, dog-iron})
>>> # Extract the first sense
>>> dog = N['dog'][0] # aka N['dog'].getSenses()[0]
>>> dog
'dog' in {noun: dog, domestic dog, Canis familiaris}
>>> dog.getPointers()[:5]
(hypernym -> {noun: canine, canid}, member meronym -> {noun: Canis, genus Canis}, member meronym -> {noun: pack}, hyponym -> {noun: pooch, doggie, doggy, barker, bow-wow}, hyponym -> {noun: cur, mongrel, mutt})
>>> dog.getPointerTargets(MEMBER_MERONYM)
[{noun: Canis, genus Canis}, {noun: pack}]
"""
__author__ = "Oliver Steele <steele@osteele.com>"
__version__ = "2.0.1"
import string
import os
from os import environ
from types import IntType, ListType, StringType, TupleType
#
# Configuration variables
#
WNHOME = environ.get('WNHOME', {
'mac': ":",
'dos': "C:\\wn16",
'nt': "C:\\Program Files\\WordNet\\2.0"}
.get(os.name, "/usr/local/wordnet2.0"))
WNSEARCHDIR = environ.get('WNSEARCHDIR', os.path.join(WNHOME, {'mac': "Database"}.get(os.name, "dict")))
ReadableRepresentations = 1
"""If true, repr(word), repr(sense), and repr(synset) return
human-readable strings instead of strings that evaluate to an object
equal to the argument.
This breaks the contract for repr, but it makes the system much more
usable from the command line."""
_TraceLookups = 0
_FILE_OPEN_MODE = os.name in ('dos', 'nt') and 'rb' or 'r' # work around a Windows Python bug
#
# Enumerated types
#
NOUN = 'noun'
VERB = 'verb'
ADJECTIVE = 'adjective'
ADVERB = 'adverb'
PartsOfSpeech = (NOUN, VERB, ADJECTIVE, ADVERB)
ANTONYM = 'antonym'
HYPERNYM = 'hypernym'
HYPONYM = 'hyponym'
ATTRIBUTE = 'attribute'
ALSO_SEE = 'also see'
ENTAILMENT = 'entailment'
CAUSE = 'cause'
VERB_GROUP = 'verb group'
MEMBER_MERONYM = 'member meronym'
SUBSTANCE_MERONYM = 'substance meronym'
PART_MERONYM = 'part meronym'
MEMBER_HOLONYM = 'member holonym'
SUBSTANCE_HOLONYM = 'substance holonym'
PART_HOLONYM = 'part holonym'
SIMILAR = 'similar'
PARTICIPLE_OF = 'participle of'
PERTAINYM = 'pertainym'
# New in wn 2.0:
FRAMES = 'frames'
CLASSIF_CATEGORY = 'domain category'
CLASSIF_USAGE = 'domain usage'
CLASSIF_REGIONAL = 'domain regional'
CLASS_CATEGORY = 'class category'
CLASS_USAGE = 'class usage'
CLASS_REGIONAL = 'class regional'
POINTER_TYPES = (
ANTONYM,
HYPERNYM,
HYPONYM,
ATTRIBUTE,
ALSO_SEE,
ENTAILMENT,
CAUSE,
VERB_GROUP,
MEMBER_MERONYM,
SUBSTANCE_MERONYM,
PART_MERONYM,
MEMBER_HOLONYM,
SUBSTANCE_HOLONYM,
PART_HOLONYM,
SIMILAR,
PARTICIPLE_OF,
PERTAINYM,
# New in wn 2.0:
FRAMES,
CLASSIF_CATEGORY,
CLASSIF_USAGE,
CLASSIF_REGIONAL,
CLASS_CATEGORY,
CLASS_USAGE,
CLASS_REGIONAL,
)
ATTRIBUTIVE = 'attributive'
PREDICATIVE = 'predicative'
IMMEDIATE_POSTNOMINAL = 'immediate postnominal'
ADJECTIVE_POSITIONS = (ATTRIBUTIVE, PREDICATIVE, IMMEDIATE_POSTNOMINAL, None)
VERB_FRAME_STRINGS = (
None,
"Something %s",
"Somebody %s",
"It is %sing",
"Something is %sing PP",
"Something %s something Adjective/Noun",
"Something %s Adjective/Noun",
"Somebody %s Adjective",
"Somebody %s something",
"Somebody %s somebody",
"Something %s somebody",
"Something %s something",
"Something %s to somebody",
"Somebody %s on something",
"Somebody %s somebody something",
"Somebody %s something to somebody",
"Somebody %s something from somebody",
"Somebody %s somebody with something",
"Somebody %s somebody of something",
"Somebody %s something on somebody",
"Somebody %s somebody PP",
"Somebody %s something PP",
"Somebody %s PP",
"Somebody's (body part) %s",
"Somebody %s somebody to INFINITIVE",
"Somebody %s somebody INFINITIVE",
"Somebody %s that CLAUSE",
"Somebody %s to somebody",
"Somebody %s to INFINITIVE",
"Somebody %s whether INFINITIVE",
"Somebody %s somebody into V-ing something",
"Somebody %s something with something",
"Somebody %s INFINITIVE",
"Somebody %s VERB-ing",
"It %s that CLAUSE",
"Something %s INFINITIVE")
#
# Domain classes
#
class Word:
"""An index into the database.
Each word has one or more Senses, which can be accessed via
``word.getSenses()`` or through the index notation, ``word[n]``.
Fields
------
form : string
The orthographic representation of the word.
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
string : string
Same as form (for compatability with version 1.0).
taggedSenseCount : integer
The number of senses that are tagged.
Examples
--------
>>> N['dog'].pos
'noun'
>>> N['dog'].form
'dog'
>>> N['dog'].taggedSenseCount
1
"""
def __init__(self, line):
"""Initialize the word from a line of a WN POS file."""
tokens = string.split(line)
ints = map(int, tokens[int(tokens[3]) + 4:])
self.form = string.replace(tokens[0], '_', ' ')
"Orthographic representation of the word."
self.pos = _normalizePOS(tokens[1])
"Part of speech. One of NOUN, VERB, ADJECTIVE, ADVERB."
self.taggedSenseCount = ints[1]
"Number of senses that are tagged."
self._synsetOffsets = ints[2:ints[0]+2]
def getPointers(self, pointerType=None):
"""Pointers connect senses and synsets, not words.
Try word[0].getPointers() instead."""
raise self.getPointers.__doc__
def getPointerTargets(self, pointerType=None):
"""Pointers connect senses and synsets, not words.
Try word[0].getPointerTargets() instead."""
raise self.getPointers.__doc__
def getSenses(self):
"""Return a sequence of senses.
>>> N['dog'].getSenses()
('dog' in {noun: dog, domestic dog, Canis familiaris}, 'dog' in {noun: frump, dog}, 'dog' in {noun: dog}, 'dog' in {noun: cad, bounder, blackguard, dog, hound, heel}, 'dog' in {noun: frank, frankfurter, hotdog, hot dog, dog, wiener, wienerwurst, weenie}, 'dog' in {noun: pawl, detent, click, dog}, 'dog' in {noun: andiron, firedog, dog, dog-iron})
"""
if not hasattr(self, '_senses'):
def getSense(offset, pos=self.pos, form=self.form):
return getSynset(pos, offset)[form]
self._senses = tuple(map(getSense, self._synsetOffsets))
del self._synsetOffsets
return self._senses
# Deprecated. Present for backwards compatability.
def senses(self):
import wordnet
#warningKey = 'SENSE_DEPRECATION_WARNING'
#if not wordnet.has_key(warningKey):
# print 'Word.senses() has been deprecated. Use Word.sense() instead.'
# wordnet[warningKey] = 1
return self.getSense()
def isTagged(self):
"""Return 1 if any sense is tagged.
>>> N['dog'].isTagged()
1
"""
return self.taggedSenseCount > 0
def getAdjectivePositions(self):
"""Return a sequence of adjective positions that this word can
appear in. These are elements of ADJECTIVE_POSITIONS.
>>> ADJ['clear'].getAdjectivePositions()
[None, 'predicative']
"""
positions = {}
for sense in self.getSenses():
positions[sense.position] = 1
return positions.keys()
adjectivePositions = getAdjectivePositions # backwards compatability
def __cmp__(self, other):
"""
>>> N['cat'] < N['dog']
1
>>> N['dog'] < V['dog']
1
"""
return _compareInstances(self, other, ('pos', 'form'))
def __str__(self):
"""Return a human-readable representation.
>>> str(N['dog'])
'dog(n.)'
"""
abbrs = {NOUN: 'n.', VERB: 'v.', ADJECTIVE: 'adj.', ADVERB: 'adv.'}
return self.form + "(" + abbrs[self.pos] + ")"
def __repr__(self):
"""If ReadableRepresentations is true, return a human-readable
representation, e.g. 'dog(n.)'.
If ReadableRepresentations is false, return a machine-readable
representation, e.g. "getWord('dog', 'noun')".
"""
if ReadableRepresentations:
return str(self)
return "getWord" + `(self.form, self.pos)`
#
# Sequence protocol (a Word's elements are its Senses)
#
def __nonzero__(self):
return 1
def __len__(self):
return len(self.getSenses())
def __getitem__(self, index):
return self.getSenses()[index]
def __getslice__(self, i, j):
return self.getSenses()[i:j]
class Synset:
"""A set of synonyms that share a common meaning.
Each synonym contains one or more Senses, which represent a
specific sense of a specific word. Senses can be retrieved via
synset.getSenses() or through the index notations synset[0],
synset[string], or synset[word]. Synsets also originate zero or
more typed pointers, which can be accessed via
synset.getPointers() or synset.getPointers(pointerType). The
targets of a synset pointer can be retrieved via
synset.getPointerTargets() or
synset.getPointerTargets(pointerType), which are equivalent to
map(Pointer.target, synset.getPointerTargets(...)).
Fields
------
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
offset : integer
An integer offset into the part-of-speech file. Together
with pos, this can be used as a unique id.
gloss : string
A gloss for the sense.
verbFrames : [integer]
A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that any
Sense in this synset participates in. (See also
Sense.verbFrames.) Defined only for verbs.
>>> V['think'][0].synset.verbFrames
(5, 9)
"""
def __init__(self, pos, offset, line):
"Initialize the synset from a line off a WN synset file."
self.pos = pos
"part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB."
self.offset = offset
"""integer offset into the part-of-speech file. Together
with pos, this can be used as a unique id."""
tokens = string.split(line[:string.index(line, '|')])
self.ssType = tokens[2]
self.gloss = string.strip(line[string.index(line, '|') + 1:])
self.lexname = Lexname.lexnames[int(tokens[1])]
(self._senseTuples, remainder) = _partition(tokens[4:], 2, string.atoi(tokens[3], 16))
(self._pointerTuples, remainder) = _partition(remainder[1:], 4, int(remainder[0]))
if pos == VERB:
(vfTuples, remainder) = _partition(remainder[1:], 3, int(remainder[0]))
def extractVerbFrames(index, vfTuples):
return tuple(map(lambda t:string.atoi(t[1]), filter(lambda t,i=index:string.atoi(t[2],16) in (0, i), vfTuples)))
senseVerbFrames = []
for index in range(1, len(self._senseTuples) + 1):
senseVerbFrames.append(extractVerbFrames(index, vfTuples))
self._senseVerbFrames = senseVerbFrames
self.verbFrames = tuple(extractVerbFrames(None, vfTuples))
"""A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that any
Sense in this synset participates in. (See also
Sense.verbFrames.) Defined only for verbs."""
def getSenses(self):
"""Return a sequence of Senses.
>>> N['dog'][0].getSenses()
('dog' in {noun: dog, domestic dog, Canis familiaris},)
"""
if not hasattr(self, '_senses'):
def loadSense(senseTuple, verbFrames=None, synset=self):
return Sense(synset, senseTuple, verbFrames)
if self.pos == VERB:
self._senses = tuple(map(loadSense, self._senseTuples, self._senseVerbFrames))
del self._senseVerbFrames
else:
self._senses = tuple(map(loadSense, self._senseTuples))
del self._senseTuples
return self._senses
senses = getSenses
def getPointers(self, pointerType=None):
"""Return a sequence of Pointers.
If pointerType is specified, only pointers of that type are
returned. In this case, pointerType should be an element of
POINTER_TYPES.
>>> N['dog'][0].getPointers()[:5]
(hypernym -> {noun: canine, canid}, member meronym -> {noun: Canis, genus Canis}, member meronym -> {noun: pack}, hyponym -> {noun: pooch, doggie, doggy, barker, bow-wow}, hyponym -> {noun: cur, mongrel, mutt})
>>> N['dog'][0].getPointers(HYPERNYM)
(hypernym -> {noun: canine, canid},)
"""
if not hasattr(self, '_pointers'):
def loadPointer(tuple, synset=self):
return Pointer(synset.offset, tuple)
self._pointers = tuple(map(loadPointer, self._pointerTuples))
del self._pointerTuples
if pointerType == None:
return self._pointers
else:
_requirePointerType(pointerType)
return filter(lambda pointer, type=pointerType: pointer.type == type, self._pointers)
pointers = getPointers # backwards compatability
def getPointerTargets(self, pointerType=None):
"""Return a sequence of Senses or Synsets.
If pointerType is specified, only targets of pointers of that
type are returned. In this case, pointerType should be an
element of POINTER_TYPES.
>>> N['dog'][0].getPointerTargets()[:5]
[{noun: canine, canid}, {noun: Canis, genus Canis}, {noun: pack}, {noun: pooch, doggie, doggy, barker, bow-wow}, {noun: cur, mongrel, mutt}]
>>> N['dog'][0].getPointerTargets(HYPERNYM)
[{noun: canine, canid}]
"""
return map(Pointer.target, self.getPointers(pointerType))
pointerTargets = getPointerTargets # backwards compatability
def isTagged(self):
"""Return 1 if any sense is tagged.
>>> N['dog'][0].isTagged()
1
>>> N['dog'][1].isTagged()
0
"""
return len(filter(Sense.isTagged, self.getSenses())) > 0
def __str__(self):
"""Return a human-readable representation.
>>> str(N['dog'][0].synset)
'{noun: dog, domestic dog, Canis familiaris}'
"""
return "{" + self.pos + ": " + string.joinfields(map(lambda sense:sense.form, self.getSenses()), ", ") + "}"
def __repr__(self):
"""If ReadableRepresentations is true, return a human-readable
representation, e.g. 'dog(n.)'.
If ReadableRepresentations is false, return a machine-readable
representation, e.g. "getSynset(pos, 1234)".
"""
if ReadableRepresentations:
return str(self)
return "getSynset" + `(self.pos, self.offset)`
def __cmp__(self, other):
return _compareInstances(self, other, ('pos', 'offset'))
#
# Sequence protocol (a Synset's elements are its senses).
#
def __nonzero__(self):
return 1
def __len__(self):
"""
>>> len(N['dog'][0].synset)
3
"""
return len(self.getSenses())
def __getitem__(self, idx):
"""
>>> N['dog'][0].synset[0] == N['dog'][0]
1
>>> N['dog'][0].synset['dog'] == N['dog'][0]
1
>>> N['dog'][0].synset[N['dog']] == N['dog'][0]
1
>>> N['cat'][6]
'cat' in {noun: big cat, cat}
"""
senses = self.getSenses()
if isinstance(idx, Word):
idx = idx.form
if isinstance(idx, StringType):
idx = _index(idx, map(lambda sense:sense.form, senses)) or \
_index(idx, map(lambda sense:sense.form, senses), _equalsIgnoreCase)
return senses[idx]
def __getslice__(self, i, j):
return self.getSenses()[i:j]
class Sense:
"""A specific meaning of a specific word -- the intersection of a Word and a Synset.
Fields
------
form : string
The orthographic representation of the Word this is a Sense of.
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB
string : string
The same as form (for compatability with version 1.0).
synset : Synset
The Synset that this Sense is a sense of.
verbFrames : [integer]
A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that this
Sense partipates in. Defined only for verbs.
>>> decide = V['decide'][0].synset # first synset for 'decide'
>>> decide[0].verbFrames
(8, 2, 26, 29)
>>> decide[1].verbFrames
(8, 2)
>>> decide[2].verbFrames
(8, 26, 29)
"""
def __init__(sense, synset, senseTuple, verbFrames=None):
"Initialize a sense from a synset's senseTuple."
# synset is stored by key (pos, synset) rather than object
# reference, to avoid creating a circular reference between
# Senses and Synsets that will prevent the vm from
# garbage-collecting them.
sense.pos = synset.pos
"part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB"
sense.synsetOffset = synset.offset
"synset key. This is used to retrieve the sense."
sense.verbFrames = verbFrames
"""A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that this
Sense partipates in. Defined only for verbs."""
(form, idString) = senseTuple
sense.position = None
if '(' in form:
index = string.index(form, '(')
key = form[index + 1:-1]
form = form[:index]
if key == 'a':
sense.position = ATTRIBUTIVE
elif key == 'p':
sense.position = PREDICATIVE
elif key == 'ip':
sense.position = IMMEDIATE_POSTNOMINAL
else:
raise "unknown attribute " + key
sense.form = string.replace(form, '_', ' ')
"orthographic representation of the Word this is a Sense of."
def __getattr__(self, name):
# see the note at __init__ about why 'synset' is provided as a
# 'virtual' slot
if name == 'synset':
return getSynset(self.pos, self.synsetOffset)
elif name == 'lexname':
return self.synset.lexname
else:
raise AttributeError, name
def __str__(self):
"""Return a human-readable representation.
>>> str(N['dog'])
'dog(n.)'
"""
return `self.form` + " in " + str(self.synset)
def __repr__(self):
"""If ReadableRepresentations is true, return a human-readable
representation, e.g. 'dog(n.)'.
If ReadableRepresentations is false, return a machine-readable
representation, e.g. "getWord('dog', 'noun')".
"""
if ReadableRepresentations:
return str(self)
return "%s[%s]" % (`self.synset`, `self.form`)
def getPointers(self, pointerType=None):
"""Return a sequence of Pointers.
If pointerType is specified, only pointers of that type are
returned. In this case, pointerType should be an element of
POINTER_TYPES.
>>> N['dog'][0].getPointers()[:5]
(hypernym -> {noun: canine, canid}, member meronym -> {noun: Canis, genus Canis}, member meronym -> {noun: pack}, hyponym -> {noun: pooch, doggie, doggy, barker, bow-wow}, hyponym -> {noun: cur, mongrel, mutt})
>>> N['dog'][0].getPointers(HYPERNYM)
(hypernym -> {noun: canine, canid},)
"""
senseIndex = _index(self, self.synset.getSenses())
def pointsFromThisSense(pointer, selfIndex=senseIndex):
return pointer.sourceIndex == 0 or pointer.sourceIndex - 1 == selfIndex
return filter(pointsFromThisSense, self.synset.getPointers(pointerType))
pointers = getPointers # backwards compatability
def getPointerTargets(self, pointerType=None):
"""Return a sequence of Senses or Synsets.
If pointerType is specified, only targets of pointers of that
type are returned. In this case, pointerType should be an
element of POINTER_TYPES.
>>> N['dog'][0].getPointerTargets()[:5]
[{noun: canine, canid}, {noun: Canis, genus Canis}, {noun: pack}, {noun: pooch, doggie, doggy, barker, bow-wow}, {noun: cur, mongrel, mutt}]
>>> N['dog'][0].getPointerTargets(HYPERNYM)
[{noun: canine, canid}]
"""
return map(Pointer.target, self.getPointers(pointerType))
pointerTargets = getPointerTargets # backwards compatability
def getSenses(self):
return self,
senses = getSenses # backwards compatability
def isTagged(self):
"""Return 1 if any sense is tagged.
>>> N['dog'][0].isTagged()
1
>>> N['dog'][1].isTagged()
0
"""
word = self.word()
return _index(self, word.getSenses()) < word.taggedSenseCount
def getWord(self):
return getWord(self.form, self.pos)
word = getWord # backwards compatability
def __cmp__(self, other):
def senseIndex(sense, synset=self.synset):
return _index(sense, synset.getSenses(), testfn=lambda a,b: a.form == b.form)
return _compareInstances(self, other, ('synset',)) or cmp(senseIndex(self), senseIndex(other))
class Pointer:
""" A typed directional relationship between Senses or Synsets.
Fields
------
type : string
One of POINTER_TYPES.
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
"""
_POINTER_TYPE_TABLE = {
'!': ANTONYM,
'@': HYPERNYM,
'~': HYPONYM,
'~i': HYPONYM, # Tom De Smedt, 2006:
'@i': HYPERNYM, # yields a KeyError otherwise
'=': ATTRIBUTE,
'^': ALSO_SEE,
'*': ENTAILMENT,
'>': CAUSE,
'$': VERB_GROUP,
'#m': MEMBER_MERONYM,
'#s': SUBSTANCE_MERONYM,
'#p': PART_MERONYM,
'%m': MEMBER_HOLONYM,
'%s': SUBSTANCE_HOLONYM,
'%p': PART_HOLONYM,
'&': SIMILAR,
'<': PARTICIPLE_OF,
'\\': PERTAINYM,
# New in wn 2.0:
'+': FRAMES,
';c': CLASSIF_CATEGORY,
';u': CLASSIF_USAGE,
';r': CLASSIF_REGIONAL,
'-c': CLASS_CATEGORY,
'-u': CLASS_USAGE,
'-r': CLASS_REGIONAL
}
def __init__(self, sourceOffset, pointerTuple):
(type, offset, pos, indices) = pointerTuple
self.type = Pointer._POINTER_TYPE_TABLE[type]
"""One of POINTER_TYPES."""
self.sourceOffset = sourceOffset
self.targetOffset = int(offset)
self.pos = _normalizePOS(pos)
"""part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB"""
indices = string.atoi(indices, 16)
self.sourceIndex = indices >> 8
self.targetIndex = indices & 255
def getSource(self):
synset = getSynset(self.pos, self.sourceOffset)
if self.sourceIndex:
return synset[self.sourceIndex - 1]
else:
return synset
source = getSource # backwards compatability
def getTarget(self):
synset = getSynset(self.pos, self.targetOffset)
if self.targetIndex:
return synset[self.targetIndex - 1]
else:
return synset
target = getTarget # backwards compatability
def __str__(self):
return self.type + " -> " + str(self.target())
def __repr__(self):
if ReadableRepresentations:
return str(self)
return "<" + str(self) + ">"
def __cmp__(self, other):
diff = _compareInstances(self, other, ('pos', 'sourceOffset'))
if diff:
return diff
synset = self.source()
def pointerIndex(sense, synset=synset):
return _index(sense, synset.getPointers(), testfn=lambda a,b: not _compareInstances(a, b, ('type', 'sourceIndex', 'targetIndex')))
return cmp(pointerIndex(self), pointerIndex(other))
# Loading the lexnames
# Klaus Ries <ries@cs.cmu.edu>
class Lexname:
dict = {}
lexnames = []
def __init__(self,name,category):
self.name = name
self.category = category
Lexname.dict[name] = self
Lexname.lexnames.append(self)
def __str__(self):
return self.name
def setupLexnames():
for l in open(WNSEARCHDIR+'/lexnames').readlines():
i,name,category = string.split(l)
Lexname(name,PartsOfSpeech[int(category)-1])
setupLexnames()
#
# Dictionary
#
class Dictionary:
"""A Dictionary contains all the Words in a given part of speech.
This module defines four dictionaries, bound to N, V, ADJ, and ADV.
Indexing a dictionary by a string retrieves the word named by that
string, e.g. dict['dog']. Indexing by an integer n retrieves the
nth word, e.g. dict[0]. Access by an arbitrary integer is very
slow except in the special case where the words are accessed
sequentially; this is to support the use of dictionaries as the
range of a for statement and as the sequence argument to map and
filter.
Example
-------
>>> N['dog']
dog(n.)
Fields
------
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
"""
def __init__(self, pos, filenameroot):
self.pos = pos
"""part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB"""
self.indexFile = _IndexFile(pos, filenameroot)
self.dataFile = open(_dataFilePathname(filenameroot), _FILE_OPEN_MODE)
def __repr__(self):
dictionaryVariables = {N: 'N', V: 'V', ADJ: 'ADJ', ADV: 'ADV'}
if dictionaryVariables.get(self):
return self.__module__ + "." + dictionaryVariables[self]
return "<%s.%s instance for %s>" % (self.__module__, "Dictionary", self.pos)
def getWord(self, form, line=None):
key = string.replace(string.lower(form), ' ', '_')
pos = self.pos
def loader(key=key, line=line, indexFile=self.indexFile):
line = line or indexFile.get(key)
return line and Word(line)
word = _entityCache.get((pos, key), loader)
if word:
return word
else:
raise KeyError, "%s is not in the %s database" % (`form`, `pos`)
def getSynset(self, offset):
pos = self.pos
def loader(pos=pos, offset=offset, dataFile=self.dataFile):
return Synset(pos, offset, _lineAt(dataFile, offset))
return _entityCache.get((pos, offset), loader)
def _buildIndexCacheFile(self):
self.indexFile._buildIndexCacheFile()
#
# Sequence protocol (a Dictionary's items are its Words)
#
def __nonzero__(self):
"""Return false. (This is to avoid scanning the whole index file
to compute len when a Dictionary is used in test position.)
>>> N and 'true'
'true'
"""
return 1
def __len__(self):
"""Return the number of index entries.
>>> len(ADJ)
21435
"""
if not hasattr(self, 'length'):
self.length = len(self.indexFile)
return self.length
def __getslice__(self, a, b):
results = []
if type(a) == type('') and type(b) == type(''):
raise "unimplemented"
elif type(a) == type(1) and type(b) == type(1):
for i in range(a, b):
results.append(self[i])
else:
raise TypeError
return results
def __getitem__(self, index):
"""If index is a String, return the Word whose form is
index. If index is an integer n, return the Word
indexed by the n'th Word in the Index file.
>>> N['dog']
dog(n.)
>>> N[0]
'hood(n.)
"""
if isinstance(index, StringType):
return self.getWord(index)
elif isinstance(index, IntType):
line = self.indexFile[index]
return self.getWord(string.replace(line[:string.find(line, ' ')], '_', ' '), line)
else:
raise TypeError, "%s is not a String or Int" % `index`
#
# Dictionary protocol
#
# a Dictionary's values are its words, keyed by their form
#
def get(self, key, default=None):
"""Return the Word whose form is _key_, or _default_.
>>> N.get('dog')
dog(n.)
>>> N.get('inu')
"""
try:
return self[key]
except LookupError:
return default
def keys(self):
"""Return a sorted list of strings that index words in this
dictionary."""
return self.indexFile.keys()
def has_key(self, form):
"""Return true iff the argument indexes a word in this dictionary.
>>> N.has_key('dog')
1
>>> N.has_key('inu')
0
"""
return self.indexFile.has_key(form)
#
# Testing
#
def _testKeys(self):
"""Verify that index lookup can find each word in the index file."""
print "Testing: ", self
file = open(self.indexFile.file.name, _FILE_OPEN_MODE)
counter = 0
while 1:
line = file.readline()
if line == '': break
if line[0] != ' ':
key = string.replace(line[:string.find(line, ' ')], '_', ' ')
if (counter % 1000) == 0:
print "%s..." % (key,),
import sys
sys.stdout.flush()
counter = counter + 1
self[key]
file.close()
print "done."
class _IndexFile:
"""An _IndexFile is an implementation class that presents a
Sequence and Dictionary interface to a sorted index file."""
def __init__(self, pos, filenameroot):
self.pos = pos
self.file = open(_indexFilePathname(filenameroot), _FILE_OPEN_MODE)
self.offsetLineCache = {} # Table of (pathname, offset) -> (line, nextOffset)
self.rewind()
self.shelfname = os.path.join(WNSEARCHDIR, pos + ".pyidx")
try:
# Tom De Smedt, 2006
# Possible error on Mac OS X.
#import shelve
#self.indexCache = shelve.open(self.shelfname, 'r')
pass
except:
pass
def rewind(self):
self.file.seek(0)
while 1:
offset = self.file.tell()
line = self.file.readline()
if (line[0] != ' '):
break
self.nextIndex = 0
self.nextOffset = offset
#
# Sequence protocol (an _IndexFile's items are its lines)
#
def __nonzero__(self):
return 1
def __len__(self):
if hasattr(self, 'indexCache'):
return len(self.indexCache)
self.rewind()
lines = 0
while 1:
line = self.file.readline()
if line == "":
break
lines = lines + 1
return lines
def __nonzero__(self):
return 1
def __getitem__(self, index):
if isinstance(index, StringType):
if hasattr(self, 'indexCache'):
return self.indexCache[index]
return binarySearchFile(self.file, index, self.offsetLineCache, 8)
elif isinstance(index, IntType):
if hasattr(self, 'indexCache'):
return self.get(self.keys[index])
if index < self.nextIndex:
self.rewind()
while self.nextIndex <= index:
self.file.seek(self.nextOffset)
line = self.file.readline()
if line == "":
raise IndexError, "index out of range"
self.nextIndex = self.nextIndex + 1
self.nextOffset = self.file.tell()
return line
else:
raise TypeError, "%s is not a String or Int" % `index`
#
# Dictionary protocol
#
# (an _IndexFile's values are its lines, keyed by the first word)
#
def get(self, key, default=None):
try:
return self[key]
except LookupError:
return default
def keys(self):
if hasattr(self, 'indexCache'):
keys = self.indexCache.keys()
keys.sort()
return keys
else:
keys = []
self.rewind()
while 1:
line = self.file.readline()
if not line: break
key = line.split(' ', 1)[0]
keys.append(key.replace('_', ' '))
return keys
def has_key(self, key):
key = key.replace(' ', '_') # test case: V['haze over']
if hasattr(self, 'indexCache'):
return self.indexCache.has_key(key)
return self.get(key) != None
#
# Index file
#
def _buildIndexCacheFile(self):
import shelve
import os
print "Building %s:" % (self.shelfname,),
tempname = self.shelfname + ".temp"
try:
indexCache = shelve.open(tempname)
self.rewind()
count = 0
while 1:
offset, line = self.file.tell(), self.file.readline()
if not line: break
key = line[:string.find(line, ' ')]
if (count % 1000) == 0:
print "%s..." % (key,),
import sys
sys.stdout.flush()
indexCache[key] = line
count = count + 1
indexCache.close()
os.rename(tempname, self.shelfname)
finally:
try: os.remove(tempname)
except: pass
print "done."
self.indexCache = shelve.open(self.shelfname, 'r')
#
# Lookup functions
#
def getWord(form, pos='noun'):
"Return a word with the given lexical form and pos."
return _dictionaryFor(pos).getWord(form)
def getSense(form, pos='noun', senseno=0):
"Lookup a sense by its sense number. Used by repr(sense)."
return getWord(form, pos)[senseno]
def getSynset(pos, offset):
"Lookup a synset by its offset. Used by repr(synset)."
return _dictionaryFor(pos).getSynset(offset)
getword, getsense, getsynset = getWord, getSense, getSynset
#
# Private utilities
#
def _requirePointerType(pointerType):
if pointerType not in POINTER_TYPES:
raise TypeError, `pointerType` + " is not a pointer type"
return pointerType
def _compareInstances(a, b, fields):
""""Return -1, 0, or 1 according to a comparison first by type,
then by class, and finally by each of fields.""" # " <- for emacs
if not hasattr(b, '__class__'):
return cmp(type(a), type(b))
elif a.__class__ != b.__class__:
return cmp(a.__class__, b.__class__)
for field in fields:
diff = cmp(getattr(a, field), getattr(b, field))
if diff:
return diff
return 0
def _equalsIgnoreCase(a, b):
"""Return true iff a and b have the same lowercase representation.
>>> _equalsIgnoreCase('dog', 'Dog')
1
>>> _equalsIgnoreCase('dOg', 'DOG')
1
"""
return a == b or string.lower(a) == string.lower(b)
#
# File utilities
#
def _dataFilePathname(filenameroot):
if os.name in ('dos', 'nt'):
path = os.path.join(WNSEARCHDIR, filenameroot + ".dat")
if os.path.exists(path):
return path
return os.path.join(WNSEARCHDIR, "data." + filenameroot)
def _indexFilePathname(filenameroot):
if os.name in ('dos', 'nt'):
path = os.path.join(WNSEARCHDIR, filenameroot + ".idx")
if os.path.exists(path):
return path
return os.path.join(WNSEARCHDIR, "index." + filenameroot)
def binarySearchFile(file, key, cache={}, cacheDepth=-1):
from stat import ST_SIZE
key = key + ' '
keylen = len(key)
start, end = 0, os.stat(file.name)[ST_SIZE]
currentDepth = 0
#count = 0
while start < end:
#count = count + 1
#if count > 20:
# raise "infinite loop"
lastState = start, end
middle = (start + end) / 2
if cache.get(middle):
offset, line = cache[middle]
else:
file.seek(max(0, middle - 1))
if middle > 0:
file.readline()
offset, line = file.tell(), file.readline()
if currentDepth < cacheDepth:
cache[middle] = (offset, line)
#print start, middle, end, offset, line,
if offset > end:
assert end != middle - 1, "infinite loop"
end = middle - 1
elif line[:keylen] == key:# and line[keylen + 1] == ' ':
return line
#elif offset == end:
# return None
elif line > key:
assert end != middle - 1, "infinite loop"
end = middle - 1
elif line < key:
start = offset + len(line) - 1
currentDepth = currentDepth + 1
thisState = start, end
if lastState == thisState:
# detects the condition where we're searching past the end
# of the file, which is otherwise difficult to detect
return None
return None
def _lineAt(file, offset):
file.seek(offset)
return file.readline()
#
# Sequence Utility Functions
#
def _index(key, sequence, testfn=None, keyfn=None):
"""Return the index of key within sequence, using testfn for
comparison and transforming items of sequence by keyfn first.
>>> _index('e', 'hello')
1
>>> _index('E', 'hello', testfn=_equalsIgnoreCase)
1
>>> _index('x', 'hello')
"""
index = 0
for element in sequence:
value = element
if keyfn:
value = keyfn(value)
if (not testfn and value == key) or (testfn and testfn(value, key)):
return index
index = index + 1
return None
def _partition(sequence, size, count):
"""Partition sequence into count subsequences of size
length, and a remainder.
Return (partitions, remainder), where partitions is a sequence of
count subsequences of cardinality count, and
apply(append, partitions) + remainder == sequence."""
partitions = []
for index in range(0, size * count, size):
partitions.append(sequence[index:index + size])
return (partitions, sequence[size * count:])
#
# Cache management
#
# Some kind of cache is necessary since Sense -> Synset references are
# stored by key, and it's nice not to have to cons a new copy of a
# Synset that's been paged in each time a Sense's synset is retrieved.
# Ideally, we'd use a weak dict, but there aren't any. A strong dict
# reintroduces the problem that eliminating the Sense <-> Synset
# circularity was intended to resolve: every entity ever seen is
# preserved forever, making operations that iterate over the entire
# database prohibitive.
#
# The LRUCache approximates a weak dict in the case where temporal
# locality is good.
class _LRUCache:
""" A cache of values such that least recently used element is
flushed when the cache fills.
Private fields
--------------
entities
a dict from key -> (value, timestamp)
history
is a dict from timestamp -> key
nextTimeStamp
is the timestamp to use with the next value that's added.
oldestTimeStamp
The timestamp of the oldest element (the next one to remove),
or slightly lower than that.
This lets us retrieve the key given the timestamp, and the
timestamp given the key. (Also the value given either one.)
That's necessary so that we can reorder the history given a key,
and also manipulate the values dict given a timestamp. #
I haven't tried changing history to a List. An earlier
implementation of history as a List was slower than what's here,
but the two implementations aren't directly comparable."""
def __init__(this, capacity):
this.capacity = capacity
this.clear()
def clear(this):
this.values = {}
this.history = {}
this.oldestTimestamp = 0
this.nextTimestamp = 1
def removeOldestEntry(this):
while this.oldestTimestamp < this.nextTimestamp:
if this.history.get(this.oldestTimestamp):
key = this.history[this.oldestTimestamp]
del this.history[this.oldestTimestamp]
del this.values[key]
return
this.oldestTimestamp = this.oldestTimestamp + 1
def setCapacity(this, capacity):
if capacity == 0:
this.clear()
else:
this.capacity = capacity
while len(this.values) > this.capacity:
this.removeOldestEntry()
def get(this, key, loadfn=None):
value = None
if this.values:
pair = this.values.get(key)
if pair:
(value, timestamp) = pair
del this.history[timestamp]
if value == None:
value = loadfn and loadfn()
if this.values != None:
timestamp = this.nextTimestamp
this.nextTimestamp = this.nextTimestamp + 1
this.values[key] = (value, timestamp)
this.history[timestamp] = key
if len(this.values) > this.capacity:
this.removeOldestEntry()
return value
class _NullCache:
"""A NullCache implements the Cache interface (the interface that
LRUCache implements), but doesn't store any values."""
def clear():
pass
def get(this, key, loadfn=None):
return loadfn and loadfn()
DEFAULT_CACHE_CAPACITY = 1000
_entityCache = _LRUCache(DEFAULT_CACHE_CAPACITY)
def disableCache():
"""Disable the entity cache."""
_entityCache = _NullCache()
def enableCache():
"""Enable the entity cache."""
if not isinstance(_entityCache, LRUCache):
_entityCache = _LRUCache(size)
def clearCache():
"""Clear the entity cache."""
_entityCache.clear()
def setCacheCapacity(capacity=DEFAULT_CACHE_CAPACITY):
"""Set the capacity of the entity cache."""
enableCache()
_entityCache.setCapacity(capacity)
setCacheSize = setCacheCapacity # for compatability with version 1.0
#
# POS Dictionaries (must be initialized after file utilities)
#
N = Dictionary(NOUN, 'noun')
V = Dictionary(VERB, 'verb')
ADJ = Dictionary(ADJECTIVE, 'adj')
ADV = Dictionary(ADVERB, 'adv')
Dictionaries = (N, V, ADJ, ADV)
#
# Part-of-speech tag normalization tables (must be initialized after
# POS dictionaries)
#
_POSNormalizationTable = {}
_POStoDictionaryTable = {}
def _initializePOSTables():
global _POSNormalizationTable, _POStoDictionaryTable
_POSNormalizationTable = {}
_POStoDictionaryTable = {}
for pos, abbreviations in (
(NOUN, "noun n n."),
(VERB, "verb v v."),
(ADJECTIVE, "adjective adj adj. a s"),
(ADVERB, "adverb adv adv. r")):
tokens = string.split(abbreviations)
for token in tokens:
_POSNormalizationTable[token] = pos
_POSNormalizationTable[string.upper(token)] = pos
for dict in Dictionaries:
_POSNormalizationTable[dict] = dict.pos
_POStoDictionaryTable[dict.pos] = dict
_initializePOSTables()
def _normalizePOS(pos):
norm = _POSNormalizationTable.get(pos)
if norm:
return norm
raise TypeError, `pos` + " is not a part of speech type"
def _dictionaryFor(pos):
pos = _normalizePOS(pos)
dict = _POStoDictionaryTable.get(pos)
if dict == None:
raise RuntimeError, "The " + `pos` + " dictionary has not been created"
return dict
def buildIndexFiles():
for dict in Dictionaries:
dict._buildIndexCacheFile()
#
# Testing
#
def _testKeys():
#This is slow, so don't do it as part of the normal test procedure.
for dictionary in Dictionaries:
dictionary._testKeys()
def _test(reset=0):
import doctest, wordnet
if reset:
doctest.master = None # This keeps doctest from complaining after a reload.
return doctest.testmod(wordnet)
| Python |
from distutils.core import setup
setup(name="pywordnet",
version="2.0.1",
description="An interface to the WordNet database.",
author="Oliver Steele",
author_email="steele@osteele.com",
url="http://pywordnet.sourceforge.net",
py_modules=["wordnet", "wntools", "concordance"],
# doc_files=["README.txt", "CHANGES.txt", "docs"]
)
| Python |
# coding: utf-8
# WORDNET - last updated for NodeBox 1.9.2
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# Copyright (c) 2007 Tom De Smedt.
# See LICENSE.txt for details.
# All other files are from PyWordNet by Oliver Steele and WordNet 2:
# http://osteele.com/projects/pywordnet/
# http://wordnet.princeton.edu
#This tells wordnet.py where to look the WordNet dictionary.
import os
pywordnet_path = os.path.join(os.path.dirname(__file__), "wordnet2")
os.environ["WNHOME"] = pywordnet_path
import wordnet as wn
import wntools
import re
NOUNS = wn.N
VERBS = wn.V
ADJECTIVES = wn.ADJ
ADVERBS = wn.ADV
ignore_accents = [
("á|ä|â|å|à", "a"),
("é|ë|ê|è", "e"),
("í|ï|î|ì", "i"),
("ó|ö|ô|ø|ò", "o"),
("ú|ü|û|ù", "u"),
("ÿ|ý", "y"),
("š", "s"),
("ç", "ç"),
("ñ", "n")
]
def _normalize(s):
""" Normalize common accented letters, WordNet does not take unicode.
"""
if isinstance(s, int): return s
try: s = str(s)
except:
try: s = s.encode("utf-8")
except:
pass
for a, b in ignore_accents: s = re.sub(a, b, s)
return s
def _synset(q, sense=0, pos=NOUNS):
"""Queries WordNet for q.
The query function always returns data related to
the sense of q.
Example: the word "tree" has the following senses:
[["tree"],
["tree", "tree diagram"],
["Tree", "Sir Herbert Beerbohm Tree"]]
Setting sense=0 would interpret "tree" as "a tree in a wood".
"""
try: return pos[_normalize(q)][sense]
except:
return None
def _parse(data):
"""_parses data from PyWordnet to lists-in-lists.
Human-readable strings from PyWordnet are
converted to a list. This list contains lists.
Each of these contains a series of words in the same "sense".
Example: [["fly", "wing"], ["travel", "go", "move", "locomote"]]
"""
if not isinstance(data, (list, tuple)):
data = [data]
return [
[word.strip(" ") for word in m.split(",")]
# Parse text between : and }
for m in re.findall("\:(.*?)\}", str(data))
]
def senses(q, pos=NOUNS):
"""Returns all senses for q.
"""
try: return _parse(pos[_normalize(q)].getSenses())
except:
return []
def count_senses(q, pos=NOUNS):
""" Returns the number of senses/interpretations of q.
Example:
for i in range(noun.count_senses(q)):
print noun.gloss(q, sense=i)
"""
return len(senses(q, pos))
def gloss(q, sense=0, pos=NOUNS):
"""Returns a description text for q.
Example: gloss("glass") returns
"a brittle transparent solid with irregular atomic structure".
"""
s = _synset(q, sense, pos)
if not s:
return ""
return s.synset.gloss
def lexname(q, sense=0, pos=NOUNS):
"""Returns a type of q.
Example: lexname("bee") returns "animal".
"""
s = _synset(q, sense, pos)
if not s:
return ""
data = str(s.lexname)
data = data[data.index(".")+1:]
if data == "Tops":
return q
return data
def hyponym(q, sense=0, pos=NOUNS):
"""Returns the implementation of q.
This can usually be considered as an "example" of q.
Example: hyponym("train") returns
[["boat train"], ["car train"], ["freight train", "rattler"],
["hospital train"], ["mail train"], ["passenger train"], ["streamliner"],
["subway train"]].
"""
s = _synset(q, sense, pos)
if not s:
return []
return _parse(s.getPointers(wn.HYPONYM))
def hyponyms(q, sense=0, pos=NOUNS):
"""Returns all hyponyms of q.
"""
s = _synset(q, sense, pos)
if not s:
return []
return _parse(flatten(wntools.tree(s, wn.HYPONYM)))
def hypernym(q, sense=0, pos=NOUNS):
"""Returns the abstraction of q.
This can usually be considered as a class to which q belongs.
Example: hypernym("train") returns [["public transport"]].
"""
s = _synset(q, sense, pos)
if not s:
return []
return _parse(s.getPointers(wn.HYPERNYM))
def hypernyms(q, sense=0, pos=NOUNS):
"""Returns all hypernyms of q.
"""
s = _synset(q, sense, pos)
if not s:
return []
return _parse(flatten(wntools.tree(s, wn.HYPERNYM)))
def antonym(q, sense=0, pos=NOUNS):
"""Returns the opposite of q.
Example: antonym("death") returns
[["birth", "nativity", "nascency", "nascence"]].
"""
s = _synset(q, sense, pos)
if not s:
return []
return _parse(s.getPointers(wn.ANTONYM))
def holonym(q, sense=0, pos=NOUNS):
"""Returns the components of q.
Example: holonym("house") returns
[["library"], ["loft", "attic", "garret"], ["porch"], ["study"]]
"""
s = _synset(q, sense, pos)
if not s:
return []
return _parse(s.getPointers(wn.PART_HOLONYM))
def meronym(q, sense=0, pos=NOUNS):
"""Returns the collection of many q"s.
That of which q is a member.
Example: meronym("tree") returns [["forest", "wood", "woods"]].
"""
s = _synset(q, sense, pos)
if not s:
return []
return _parse(s.getPointers(wn.MEMBER_MERONYM))
def meet(q1, q2, sense1=0, sense2=0, pos=NOUNS):
"""Returns what q1 and q2 have in common.
"""
s1 = _synset(q1, sense1, pos)
s2 = _synset(q2, sense2, pos)
if not s1: return []
if not s2: return []
return _parse(wntools.meet(s1, s2))
def flatten(tree):
"""Flattens a tree to a list.
Example: ["one", ["two", ["three", ["four"]]]]
becomes: ["one", "two", "three", "four"]
"""
i = 0
while i < len(tree):
while isinstance(tree[i], (list, tuple)):
if not tree[i]:
tree.pop(i)
if not len(tree): break
else:
tree[i:i+1] = list(tree[i])
i += 1
return tree
def absurd_gloss(q, sense=0, pos=NOUNS, up=3, down=2):
"""
Attempts to simulate humor:
takes an abstract interpretation of the word,
and takes random examples of that abstract;
one of these is to be the description of the word.
The returned gloss is thus not purely random,
it is still faintly related to the given word.
"""
from random import random, choice
def _up(path):
p = hypernym(path, sense, pos)
if p: return p[0][0]
return path
def _down(path):
p = hyponym(path, sense, pos)
if p: return choice(p)[0]
return path
for i in range(up): q = _up(q)
for i in range(down): q = _down(q)
return gloss(q)
def is_noun(q):
return NOUNS.has_key(_normalize(q))
def is_verb(q):
return VERBS.has_key(_normalize(q))
def is_adjective(q):
return ADJECTIVES.has_key(_normalize(q))
def is_adverb(q):
return ADVERBS.has_key(_normalize(q))
def all_nouns() : return NOUNS
def all_verbs() : return VERBS
def all_adjectives() : return ADJECTIVES
def all_adverbs() : return ADVERBS
def _meta_create_shortcuts():
""" Writes and compiles shortcut commands.
For example: a noun_hyponym() command
is created that has the following definition:
def noun_hyponym(q, sense=0):
return hyponym(q, sense, pos=NOUNS)
When the loop has executed you'll have comparable
verb_, adjective_ and adverb_ shortcuts
for each WordNet command.
"""
def_prefixes = ["noun", "verb", "adjective", "adverb"]
defs = ["count_senses", "senses", "gloss", "lexname",
"hyponym", "hyponyms", "hypernym", "hypernyms",
"antonym", "meronym", "holonym", "meet", "absurd_gloss"]
for p in def_prefixes:
for f in defs:
if f == "count_senses" \
or f == "senses":
params1 = "q"
params2 = "q"
elif f == "meet":
params1 = "q1, q2, sense1=0, sense2=0"
params2 = "q1, q2, sense1, sense2"
else:
params1 = "q, sense=0"
params2 = "q, sense"
code = "global "+p+"_"+f+"\n"
code += "def "+p+"_"+f+"("+params1+"):\n"
code += " return "+f+"("+params2+", pos="+p.upper()+"S)"
eval(compile(code, "<string>", "exec"))
#print code
_meta_create_shortcuts()
#print len(all_adverbs())
#print [str(x).rstrip("(n.)") for x in all_nouns()[:20]]
#print noun_lexname("fear")
#print noun_holonym("fish")
#print adjective_gloss("weak")
#print verb_antonym("sleep") | Python |
#!/usr/bin/env python2.4
#
# A Test Driver for Doctest
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
#
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
"""
A driver for testing interactive python examples in text files and
docstrings. This doctest driver performs three functions:
- checking: Runs the interactive examples, and reports any examples
whose actual output does not match their expected output.
- debugging: Runs the interactive examples, and enters the debugger
whenever an example's actual output does not match its expected
output.
- updating: Runs the interactive examples, and replaces the expected
output with the actual output whenever they don't match. This is
used to update the output for new or out-of-date examples.
A number of other flags can be given; call the driver with the
`--help` option for a complete list.
"""
import os, os.path, sys, unittest, pdb, bdb, re, tempfile, traceback
from doctest import *
from doctest import DocTestCase
from optparse import OptionParser, OptionGroup, Option
from StringIO import StringIO
__version__ = '0.1'
###########################################################################
# Utility Functions
###########################################################################
# These are copied from doctest; I don't import them because they're
# private. See the versions in doctest for docstrings, etc.
class _OutputRedirectingPdb(pdb.Pdb):
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
save_stdout = sys.stdout
sys.stdout = self.__out
pdb.Pdb.trace_dispatch(self, *args)
sys.stdout = save_stdout
def _exception_traceback(exc_info):
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
if result and not result.endswith("\n"):
result += "\n"
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
###########################################################################
# Update Runner
###########################################################################
class UpdateRunner(DocTestRunner):
"""
A subclass of `DocTestRunner` that checks the output of each
example, and replaces the expected output with the actual output
for any examples that fail.
`UpdateRunner` can be used:
- To automatically fill in the expected output for new examples.
- To correct examples whose output has become out-of-date.
However, care must be taken not to update an example's expected
output with an incorrect value.
"""
def __init__(self, verbose=False, mark_updates=False):
self._mark_updates = mark_updates
DocTestRunner.__init__(self, verbose=verbose)
def run(self, test, compileflags=None, out=None, clear_globs=True):
self._new_want = {}
(f,t) = DocTestRunner.run(self, test, compileflags, out, clear_globs)
# Update the test's docstring, and the lineno's of the
# examples, by breaking it into lines and replacing the old
# expected outputs with the new expected outputs.
old_lines = test.docstring.split('\n')
new_lines = []
lineno = 0
offset = 0
for example in test.examples:
# Copy the lines up through the start of the example's
# output from old_lines to new_lines.
got_start = example.lineno + example.source.count('\n')
new_lines += old_lines[lineno:got_start]
lineno = got_start
# Do a sanity check to make sure we're at the right lineno
# (In particular, check that the example's expected output
# appears in old_lines where we expect it to appear.)
if example.want:
assert (example.want.split('\n')[0] ==
old_lines[lineno][example.indent:]), \
'Line number mismatch at %d' % lineno
# Skip over the old expected output.
old_len = example.want.count('\n')
lineno += old_len
# Mark any changes we make.
if self._mark_updates and example in self._new_want:
new_lines.append(' '*example.indent + '... ' +
'# [!!] OUTPUT AUTOMATICALLY UPDATED [!!]')
# Add the new expected output.
new_want = self._new_want.get(example, example.want)
if new_want:
new_want = '\n'.join([' '*example.indent+l
for l in new_want[:-1].split('\n')])
new_lines.append(new_want)
# Update the example's want & lieno fields
example.want = new_want
example.lineno += offset
offset += example.want.count('\n') - old_len
# Add any remaining lines
new_lines += old_lines[lineno:]
# Update the test's docstring.
test.docstring = '\n'.join(new_lines)
# Return failures & tries
return (f,t)
def report_start(self, out, test, example):
pass
def report_success(self, out, test, example, got):
pass
def report_unexpected_exception(self, out, test, example, exc_info):
replacement = _exception_traceback(exc_info)
self._new_want[example] = replacement
if self._verbose:
self._report_replacement(out, test, example, replacement)
def report_failure(self, out, test, example, got):
self._new_want[example] = got
if self._verbose:
self._report_replacement(out, test, example, got)
def _report_replacement(self, out, test, example, replacement):
want = '\n'.join([' '+l for l in example.want.split('\n')[:-1]])
repl = '\n'.join([' '+l for l in replacement.split('\n')[:-1]])
if want and repl:
diff = 'Replacing:\n%s\nWith:\n%s\n' % (want, repl)
elif want:
diff = 'Removing:\n%s\n' % want
elif repl:
diff = 'Adding:\n%s\n' % repl
out(self._header(test, example) + diff)
DIVIDER = '-'*70
def _header(self, test, example):
if test.filename is None:
tag = ("On line #%s of %s" %
(example.lineno+1, test.name))
elif test.lineno is None:
tag = ("On line #%s of %s in %s" %
(example.lineno+1, test.name, test.filename))
else:
lineno = test.lineno+example.lineno+1
tag = ("On line #%s of %s (%s)" %
(lineno, test.filename, test.name))
source_lines = example.source.rstrip().split('\n')
return (self.DIVIDER + '\n' + tag + '\n' +
' >>> %s\n' % source_lines[0] +
''.join([' ... %s\n' % l for l in source_lines[1:]]))
###########################################################################
# Debugger
###########################################################################
def _indent(s, indent=4):
return re.sub('(?m)^(?!$)', indent*' ', s)
import keyword, token, tokenize
class Debugger:
# Just using this for reporting:
runner = DocTestRunner()
def __init__(self, checker=None, set_trace=None):
if checker is None:
checker = OutputChecker()
self.checker = checker
if set_trace is None:
set_trace = pdb.Pdb().set_trace
self.set_trace = set_trace
def _check_output(self, example):
want = example.want
optionflags = self._get_optionflags(example)
got = sys.stdout.getvalue()
sys.stdout.truncate(0)
if not self.checker.check_output(want, got, optionflags):
self.runner.report_failure(self.save_stdout.write,
self.test, example, got)
return False
else:
return True
def _check_exception(self, example):
want_exc_msg = example.exc_msg
optionflags = self._get_optionflags(example)
exc_info = sys.exc_info()
got_exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not self.checker.check_output(want_exc_msg, got_exc_msg,
optionflags):
got = _exception_traceback(exc_info)
self.runner.report_failure(self.save_stdout.write,
self.test, example, got)
return False
else:
return True
def _print_if_not_none(self, *args):
if args == (None,):
pass
elif len(args) == 1:
print `args[0]`
else:
print `args` # not quite right: >>> 1,
def _comment_line(self, line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
def _script_from_examples(self, s):
output = []
examplenum = 0
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
self._script_from_example(piece, examplenum, output)
examplenum += 1
else:
# Add non-example text.
output += [self._comment_line(l)
for l in piece.split('\n')[:-1]]
# Combine the output, and return it.
return '\n'.join(output)
_CHK_OUT = 'if not CHECK_OUTPUT(__examples__[%d]): __set_trace__()'
_CHK_EXC = 'if not CHECK_EXCEPTION(__examples__[%d]): __set_trace__()'
def _script_from_example(self, example, i, output):
source = self._simulate_compile_singlemode(example.source)[:-1]
if example.exc_msg is None:
output.append(source)
output.append(self._CHK_OUT % i)
else:
output.append('try:')
output.append(_indent(source))
output.append(' '+self._CHK_OUT % i)
output.append('except:')
output.append(' '+self._CHK_EXC % i)
def _simulate_compile_singlemode(self, s):
# Calculate line offsets
lines = [0, 0]
pos = 0
while 1:
pos = s.find('\n', pos)+1
if not pos: break
lines.append(pos)
lines.append(len(s))
oldpos = 0
parenlevel = 0
deflevel = 0
output = []
stmt = []
text = StringIO(s)
tok_gen = tokenize.generate_tokens(text.readline)
for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen:
newpos = lines[srow] + scol
stmt.append(s[oldpos:newpos])
if tok != '':
stmt.append(tok)
oldpos = newpos + len(tok)
# Update the paren level.
if tok in '([{':
parenlevel += 1
if tok in '}])':
parenlevel -= 1
if tok in ('def', 'class') and deflevel == 0:
deflevel = 1
if deflevel and toktype == token.INDENT:
deflevel += 1
if deflevel and toktype == token.DEDENT:
deflevel -= 1
# Are we starting a statement?
if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT,
token.INDENT, token.ENDMARKER) or
tok==':') and parenlevel == 0):
if deflevel == 0 and self._is_expr(stmt[1:-2]):
output += stmt[0]
output.append('__print__((')
output += stmt[1:-2]
output.append('))')
output += stmt[-2:]
else:
output += stmt
stmt = []
return ''.join(output)
def _is_expr(self, stmt):
stmt = [t for t in stmt if t]
if not stmt:
return False
# An assignment signifies a non-exception, *unless* it
# appears inside of parens (eg, ``f(x=1)``.)
parenlevel = 0
for tok in stmt:
if tok in '([{': parenlevel += 1
if tok in '}])': parenlevel -= 1
if (parenlevel == 0 and
tok in ('=', '+=', '-=', '*=', '/=', '%=', '&=', '+=',
'^=', '<<=', '>>=', '**=', '//=')):
return False
# Any keywords *except* "not", "or", "and", "lambda", "in", "is"
# signifies a non-expression.
if stmt[0] in ("assert", "break", "class", "continue", "def",
"del", "elif", "else", "except", "exec",
"finally", "for", "from", "global", "if",
"import", "pass", "print", "raise", "return",
"try", "while", "yield"):
return False
return True
def _get_optionflags(self, example):
optionflags = 0
for (flag, val) in example.options.items():
if val:
optionflags |= flag
else:
optionflags &= ~flag
return optionflags
def debug(self, test, pm=False):
self.test = test
# Save the old stdout
self.save_stdout = sys.stdout
# Convert the source docstring to a script.
script = self._script_from_examples(test.docstring)
# Create a debugger.
debugger = _OutputRedirectingPdb(sys.stdout)
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
save_set_trace = pdb.set_trace
pdb.set_trace = debugger.set_trace
# Write the script to a temporary file. Note that
# tempfile.NameTemporaryFile() cannot be used. As the docs
# say, a file so created cannot be opened by name a second
# time on modern Windows boxes, and execfile() needs to open
# it.
srcfilename = tempfile.mktemp(".py", "doctestdebug_")
f = open(srcfilename, 'w')
f.write(script)
f.close()
# Set up the globals
test.globs['CHECK_OUTPUT'] = self._check_output
test.globs['CHECK_EXCEPTION'] = self._check_exception
test.globs['__print__'] = self._print_if_not_none
test.globs['__set_trace__'] = debugger.set_trace
test.globs['__examples__'] = self.test.examples
try:
if pm is False:
debugger.run("execfile(%r)" % srcfilename,
test.globs, test.globs)
else:
try:
sys.stdout = _SpoofOut()
try:
execfile(srcfilename, test.globs)
except bdb.BdbQuit:
return
except:
sys.stdout = self.save_stdout
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(
exc_info[0], exc_info[1])[-1]
self.save_stdout.write(self.runner.DIVIDER+'\n')
self.save_stdout.write('Unexpected exception:\n' +
_indent(exc_msg))
raise
#self.post_mortem(debugger, exc_info[2])
finally:
sys.stdout = self.save_stdout
finally:
sys.set_trace = save_set_trace
os.remove(srcfilename)
def post_mortem(self, debugger, t):
debugger.reset()
while t.tb_next is not None:
t = t.tb_next
debugger.interaction(t.tb_frame, t)
###########################################################################
# Helper functions
###########################################################################
# Name can be:
# - The filename of a text file
# - The filename of a python file
# - The dotted name of a python module
# Return a list of test!
def find(name):
# Check for test names
if ':' in name:
(name, testname) = name.split(':')
else:
testname = None
if os.path.exists(name):
filename = os.path.normpath(os.path.abspath(name))
ext = os.path.splitext(filename)[-1]
if (ext[-3:] != '.py' and ext[-4:-1] != '.py'):
# It's a text file; return the filename.
if testname is not None:
raise ValueError("test names can't be specified "
"for text files")
s = open(filename).read()
test = DocTestParser().get_doctest(s, {}, name, filename, 0)
return [test]
else:
# It's a python file; import it. Make sure to set the
# path correctly.
basedir, modname = find_module_from_filename(filename)
orig_path = sys.path[:]
try:
sys.path.insert(0, basedir)
module = import_from_name(modname)
finally:
sys.path[:] = orig_path
else:
module = import_from_name(name)
# Find tests.
tests = DocTestFinder().find(module)
if testname is not None:
testname = '%s.%s' % (module.__name__, testname)
tests = [t for t in tests if t.name.startswith(testname)]
if len(tests) == 0:
raise ValueError("test not found")
return tests
def import_from_name(name):
try:
return __import__(name, globals(), locals(), ['*'])
except Exception, e:
raise ValueError, str(e)
except:
raise ValueError, 'Error importing %r' % name
def find_module_from_filename(filename):
"""
Given a filename, return a tuple `(basedir, module)`, where
`module` is the module's name, and `basedir` is the directory it
should be loaded from (this directory should be added to the
path to import it). Packages are handled correctly.
"""
(basedir, file) = os.path.split(filename)
(module_name, ext) = os.path.splitext(file)
# If it's a package, then import with the directory name (don't
# use __init__ as the module name).
if module_name == '__init__':
(basedir, module_name) = os.path.split(basedir)
# If it's contained inside a package, then find the base dir.
if (os.path.exists(os.path.join(basedir, '__init__.py')) or
os.path.exists(os.path.join(basedir, '__init__.pyc')) or
os.path.exists(os.path.join(basedir, '__init__.pyw'))):
package = []
while os.path.exists(os.path.join(basedir, '__init__.py')):
(basedir,dir) = os.path.split(basedir)
if dir == '': break
package.append(dir)
package.reverse()
module_name = '.'.join(package+[module_name])
return (basedir, module_name)
###########################################################################
# Basic Actions
###########################################################################
def run(names, optionflags, verbosity):
suite = unittest.TestSuite()
for name in names:
try:
for test in find(name):
suite.addTest(DocTestCase(test, optionflags))
except ValueError, e:
print >>sys.stderr, ('%s: Error processing %s -- %s' %
(sys.argv[0], name, e))
unittest.TextTestRunner(verbosity=verbosity).run(suite)
def debug(names, optionflags, verbosity, pm=True):
debugger = Debugger()
for name in names:
try:
for test in find(name):
debugger.debug(test, pm)
except ValueError, e:
raise
print >>sys.stderr, ('%s: Error processing %s -- %s' %
(sys.argv[0], name, e))
def update(names, optionflags, verbosity):
parser = DocTestParser()
runner = UpdateRunner(verbose=True)
for name in names:
try:
# Make sure we're running on a text file.
tests = find(name)
if len(tests) != 1 or tests[0].lineno != 0:
raise ValueError('update can only be used with text files')
test = tests[0]
# Run the updater!
(failures, tries) = runner.run(test)
# Confirm the changes.
if failures == 0:
print 'No updates needed!'
else:
print '*'*70
print '%d examples updated.' % failures
print '-'*70
sys.stdout.write('Accept updates? [y/N] ')
sys.stdout.flush()
if sys.stdin.readline().lower().strip() in ('y', 'yes'):
# Make a backup of the original contents.
backup = test.filename+'.bak'
print 'Renaming %s -> %s' % (name, backup)
os.rename(test.filename, backup)
# Write the new contents.
print 'Writing updated version to %s' % test.filename
out = open(test.filename, 'w')
out.write(test.docstring)
out.close()
else:
print 'Updates rejected!'
except ValueError, e:
raise
print >>sys.stderr, ('%s: Error processing %s -- %s' %
(sys.argv[0], name, e))
###########################################################################
# Main script
###########################################################################
# Action options
CHECK_OPT = Option("--check",
action="store_const", dest="action", const="check",
default="check",
help="Verify the output of the doctest examples in the "
"given files.")
UPDATE_OPT = Option("--update", "-u",
action="store_const", dest="action", const="update",
help="Update the expected output for new or out-of-date "
"doctest examples in the given files. In "
"particular, find every example whose actual output "
"does not match its expected output; and replace its "
"expected output with its actual output. You will "
"be asked to verify the changes before they are "
"written back to the file; be sure to check them over "
"carefully, to ensure that you don't accidentally "
"create broken test cases.")
DEBUG_OPT = Option("--debug",
action="store_const", dest="action", const="debug",
help="Verify the output of the doctest examples in the "
"given files. If any example fails, then enter the "
"python debugger.")
# Reporting options
VERBOSE_OPT = Option("-v", "--verbose",
action="count", dest="verbosity", default=1,
help="Increase verbosity.")
QUIET_OPT = Option("-q", "--quiet",
action="store_const", dest="verbosity", const=0,
help="Decrease verbosity.")
UDIFF_OPT = Option("--udiff", '-d',
action="store_const", dest="udiff", const=1, default=0,
help="Display test failures using unified diffs.")
CDIFF_OPT = Option("--cdiff",
action="store_const", dest="cdiff", const=1, default=0,
help="Display test failures using context diffs.")
NDIFF_OPT = Option("--ndiff",
action="store_const", dest="ndiff", const=1, default=0,
help="Display test failures using ndiffs.")
# Output Comparison options
ELLIPSIS_OPT = Option("--ellipsis",
action="store_const", dest="ellipsis", const=1, default=0,
help="Allow \"...\" to be used for ellipsis in the "
"expected output.")
NORMWS_OPT = Option("--normalize_whitespace",
action="store_const", dest="normws", const=1, default=0,
help="Ignore whitespace differences between "
"the expected output and the actual output.")
def main():
# Create the option parser.
optparser = OptionParser(usage='%prog [options] NAME ...',
version="Edloper's Doctest Driver, "
"version %s" % __version__)
action_group = OptionGroup(optparser, 'Actions (default=check)')
action_group.add_options([CHECK_OPT, UPDATE_OPT, DEBUG_OPT])
optparser.add_option_group(action_group)
reporting_group = OptionGroup(optparser, 'Reporting')
reporting_group.add_options([VERBOSE_OPT, QUIET_OPT,
UDIFF_OPT, CDIFF_OPT, NDIFF_OPT])
optparser.add_option_group(reporting_group)
compare_group = OptionGroup(optparser, 'Output Comparison')
compare_group.add_options([ELLIPSIS_OPT, NORMWS_OPT])
optparser.add_option_group(compare_group)
# Extract optionflags and the list of file names.
optionvals, names = optparser.parse_args()
if len(names) == 0:
optparser.error("No files specified")
optionflags = (optionvals.udiff * REPORT_UDIFF |
optionvals.cdiff * REPORT_CDIFF |
optionvals.ellipsis * ELLIPSIS |
optionvals.normws * NORMALIZE_WHITESPACE)
# Perform the requested action.
if optionvals.action == 'check':
run(names, optionflags, optionvals.verbosity)
elif optionvals.action == 'update':
update(names, optionflags, optionvals.verbosity)
elif optionvals.action == 'debug':
debug(names, optionflags, optionvals.verbosity)
else:
optparser.error('INTERNAL ERROR: Bad action %s' % optionvals.action)
if __name__ == '__main__': main()
| Python |
#!/usr/bin/env python2.4
#
# A Test Driver for Doctest
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
#
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
"""
A driver for testing interactive python examples in text files and
docstrings. This doctest driver performs three functions:
- checking: Runs the interactive examples, and reports any examples
whose actual output does not match their expected output.
- debugging: Runs the interactive examples, and enters the debugger
whenever an example's actual output does not match its expected
output.
- updating: Runs the interactive examples, and replaces the expected
output with the actual output whenever they don't match. This is
used to update the output for new or out-of-date examples.
A number of other flags can be given; call the driver with the
`--help` option for a complete list.
"""
import os, os.path, sys, unittest, pdb, bdb, re, tempfile, traceback
from doctest import *
from doctest import DocTestCase
from optparse import OptionParser, OptionGroup, Option
from StringIO import StringIO
__version__ = '0.1'
###########################################################################
# Utility Functions
###########################################################################
# These are copied from doctest; I don't import them because they're
# private. See the versions in doctest for docstrings, etc.
class _OutputRedirectingPdb(pdb.Pdb):
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
save_stdout = sys.stdout
sys.stdout = self.__out
pdb.Pdb.trace_dispatch(self, *args)
sys.stdout = save_stdout
def _exception_traceback(exc_info):
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
if result and not result.endswith("\n"):
result += "\n"
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
###########################################################################
# Update Runner
###########################################################################
class UpdateRunner(DocTestRunner):
"""
A subclass of `DocTestRunner` that checks the output of each
example, and replaces the expected output with the actual output
for any examples that fail.
`UpdateRunner` can be used:
- To automatically fill in the expected output for new examples.
- To correct examples whose output has become out-of-date.
However, care must be taken not to update an example's expected
output with an incorrect value.
"""
def __init__(self, verbose=False, mark_updates=False):
self._mark_updates = mark_updates
DocTestRunner.__init__(self, verbose=verbose)
def run(self, test, compileflags=None, out=None, clear_globs=True):
self._new_want = {}
(f,t) = DocTestRunner.run(self, test, compileflags, out, clear_globs)
# Update the test's docstring, and the lineno's of the
# examples, by breaking it into lines and replacing the old
# expected outputs with the new expected outputs.
old_lines = test.docstring.split('\n')
new_lines = []
lineno = 0
offset = 0
for example in test.examples:
# Copy the lines up through the start of the example's
# output from old_lines to new_lines.
got_start = example.lineno + example.source.count('\n')
new_lines += old_lines[lineno:got_start]
lineno = got_start
# Do a sanity check to make sure we're at the right lineno
# (In particular, check that the example's expected output
# appears in old_lines where we expect it to appear.)
if example.want:
assert (example.want.split('\n')[0] ==
old_lines[lineno][example.indent:]), \
'Line number mismatch at %d' % lineno
# Skip over the old expected output.
old_len = example.want.count('\n')
lineno += old_len
# Mark any changes we make.
if self._mark_updates and example in self._new_want:
new_lines.append(' '*example.indent + '... ' +
'# [!!] OUTPUT AUTOMATICALLY UPDATED [!!]')
# Add the new expected output.
new_want = self._new_want.get(example, example.want)
if new_want:
new_want = '\n'.join([' '*example.indent+l
for l in new_want[:-1].split('\n')])
new_lines.append(new_want)
# Update the example's want & lieno fields
example.want = new_want
example.lineno += offset
offset += example.want.count('\n') - old_len
# Add any remaining lines
new_lines += old_lines[lineno:]
# Update the test's docstring.
test.docstring = '\n'.join(new_lines)
# Return failures & tries
return (f,t)
def report_start(self, out, test, example):
pass
def report_success(self, out, test, example, got):
pass
def report_unexpected_exception(self, out, test, example, exc_info):
replacement = _exception_traceback(exc_info)
self._new_want[example] = replacement
if self._verbose:
self._report_replacement(out, test, example, replacement)
def report_failure(self, out, test, example, got):
self._new_want[example] = got
if self._verbose:
self._report_replacement(out, test, example, got)
def _report_replacement(self, out, test, example, replacement):
want = '\n'.join([' '+l for l in example.want.split('\n')[:-1]])
repl = '\n'.join([' '+l for l in replacement.split('\n')[:-1]])
if want and repl:
diff = 'Replacing:\n%s\nWith:\n%s\n' % (want, repl)
elif want:
diff = 'Removing:\n%s\n' % want
elif repl:
diff = 'Adding:\n%s\n' % repl
out(self._header(test, example) + diff)
DIVIDER = '-'*70
def _header(self, test, example):
if test.filename is None:
tag = ("On line #%s of %s" %
(example.lineno+1, test.name))
elif test.lineno is None:
tag = ("On line #%s of %s in %s" %
(example.lineno+1, test.name, test.filename))
else:
lineno = test.lineno+example.lineno+1
tag = ("On line #%s of %s (%s)" %
(lineno, test.filename, test.name))
source_lines = example.source.rstrip().split('\n')
return (self.DIVIDER + '\n' + tag + '\n' +
' >>> %s\n' % source_lines[0] +
''.join([' ... %s\n' % l for l in source_lines[1:]]))
###########################################################################
# Debugger
###########################################################################
def _indent(s, indent=4):
return re.sub('(?m)^(?!$)', indent*' ', s)
import keyword, token, tokenize
class Debugger:
# Just using this for reporting:
runner = DocTestRunner()
def __init__(self, checker=None, set_trace=None):
if checker is None:
checker = OutputChecker()
self.checker = checker
if set_trace is None:
set_trace = pdb.Pdb().set_trace
self.set_trace = set_trace
def _check_output(self, example):
want = example.want
optionflags = self._get_optionflags(example)
got = sys.stdout.getvalue()
sys.stdout.truncate(0)
if not self.checker.check_output(want, got, optionflags):
self.runner.report_failure(self.save_stdout.write,
self.test, example, got)
return False
else:
return True
def _check_exception(self, example):
want_exc_msg = example.exc_msg
optionflags = self._get_optionflags(example)
exc_info = sys.exc_info()
got_exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not self.checker.check_output(want_exc_msg, got_exc_msg,
optionflags):
got = _exception_traceback(exc_info)
self.runner.report_failure(self.save_stdout.write,
self.test, example, got)
return False
else:
return True
def _print_if_not_none(self, *args):
if args == (None,):
pass
elif len(args) == 1:
print `args[0]`
else:
print `args` # not quite right: >>> 1,
def _comment_line(self, line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
def _script_from_examples(self, s):
output = []
examplenum = 0
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
self._script_from_example(piece, examplenum, output)
examplenum += 1
else:
# Add non-example text.
output += [self._comment_line(l)
for l in piece.split('\n')[:-1]]
# Combine the output, and return it.
return '\n'.join(output)
_CHK_OUT = 'if not CHECK_OUTPUT(__examples__[%d]): __set_trace__()'
_CHK_EXC = 'if not CHECK_EXCEPTION(__examples__[%d]): __set_trace__()'
def _script_from_example(self, example, i, output):
source = self._simulate_compile_singlemode(example.source)[:-1]
if example.exc_msg is None:
output.append(source)
output.append(self._CHK_OUT % i)
else:
output.append('try:')
output.append(_indent(source))
output.append(' '+self._CHK_OUT % i)
output.append('except:')
output.append(' '+self._CHK_EXC % i)
def _simulate_compile_singlemode(self, s):
# Calculate line offsets
lines = [0, 0]
pos = 0
while 1:
pos = s.find('\n', pos)+1
if not pos: break
lines.append(pos)
lines.append(len(s))
oldpos = 0
parenlevel = 0
deflevel = 0
output = []
stmt = []
text = StringIO(s)
tok_gen = tokenize.generate_tokens(text.readline)
for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen:
newpos = lines[srow] + scol
stmt.append(s[oldpos:newpos])
if tok != '':
stmt.append(tok)
oldpos = newpos + len(tok)
# Update the paren level.
if tok in '([{':
parenlevel += 1
if tok in '}])':
parenlevel -= 1
if tok in ('def', 'class') and deflevel == 0:
deflevel = 1
if deflevel and toktype == token.INDENT:
deflevel += 1
if deflevel and toktype == token.DEDENT:
deflevel -= 1
# Are we starting a statement?
if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT,
token.INDENT, token.ENDMARKER) or
tok==':') and parenlevel == 0):
if deflevel == 0 and self._is_expr(stmt[1:-2]):
output += stmt[0]
output.append('__print__((')
output += stmt[1:-2]
output.append('))')
output += stmt[-2:]
else:
output += stmt
stmt = []
return ''.join(output)
def _is_expr(self, stmt):
stmt = [t for t in stmt if t]
if not stmt:
return False
# An assignment signifies a non-exception, *unless* it
# appears inside of parens (eg, ``f(x=1)``.)
parenlevel = 0
for tok in stmt:
if tok in '([{': parenlevel += 1
if tok in '}])': parenlevel -= 1
if (parenlevel == 0 and
tok in ('=', '+=', '-=', '*=', '/=', '%=', '&=', '+=',
'^=', '<<=', '>>=', '**=', '//=')):
return False
# Any keywords *except* "not", "or", "and", "lambda", "in", "is"
# signifies a non-expression.
if stmt[0] in ("assert", "break", "class", "continue", "def",
"del", "elif", "else", "except", "exec",
"finally", "for", "from", "global", "if",
"import", "pass", "print", "raise", "return",
"try", "while", "yield"):
return False
return True
def _get_optionflags(self, example):
optionflags = 0
for (flag, val) in example.options.items():
if val:
optionflags |= flag
else:
optionflags &= ~flag
return optionflags
def debug(self, test, pm=False):
self.test = test
# Save the old stdout
self.save_stdout = sys.stdout
# Convert the source docstring to a script.
script = self._script_from_examples(test.docstring)
# Create a debugger.
debugger = _OutputRedirectingPdb(sys.stdout)
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
save_set_trace = pdb.set_trace
pdb.set_trace = debugger.set_trace
# Write the script to a temporary file. Note that
# tempfile.NameTemporaryFile() cannot be used. As the docs
# say, a file so created cannot be opened by name a second
# time on modern Windows boxes, and execfile() needs to open
# it.
srcfilename = tempfile.mktemp(".py", "doctestdebug_")
f = open(srcfilename, 'w')
f.write(script)
f.close()
# Set up the globals
test.globs['CHECK_OUTPUT'] = self._check_output
test.globs['CHECK_EXCEPTION'] = self._check_exception
test.globs['__print__'] = self._print_if_not_none
test.globs['__set_trace__'] = debugger.set_trace
test.globs['__examples__'] = self.test.examples
try:
if pm is False:
debugger.run("execfile(%r)" % srcfilename,
test.globs, test.globs)
else:
try:
sys.stdout = _SpoofOut()
try:
execfile(srcfilename, test.globs)
except bdb.BdbQuit:
return
except:
sys.stdout = self.save_stdout
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(
exc_info[0], exc_info[1])[-1]
self.save_stdout.write(self.runner.DIVIDER+'\n')
self.save_stdout.write('Unexpected exception:\n' +
_indent(exc_msg))
raise
#self.post_mortem(debugger, exc_info[2])
finally:
sys.stdout = self.save_stdout
finally:
sys.set_trace = save_set_trace
os.remove(srcfilename)
def post_mortem(self, debugger, t):
debugger.reset()
while t.tb_next is not None:
t = t.tb_next
debugger.interaction(t.tb_frame, t)
###########################################################################
# Helper functions
###########################################################################
# Name can be:
# - The filename of a text file
# - The filename of a python file
# - The dotted name of a python module
# Return a list of test!
def find(name):
# Check for test names
if ':' in name:
(name, testname) = name.split(':')
else:
testname = None
if os.path.exists(name):
filename = os.path.normpath(os.path.abspath(name))
ext = os.path.splitext(filename)[-1]
if (ext[-3:] != '.py' and ext[-4:-1] != '.py'):
# It's a text file; return the filename.
if testname is not None:
raise ValueError("test names can't be specified "
"for text files")
s = open(filename).read()
test = DocTestParser().get_doctest(s, {}, name, filename, 0)
return [test]
else:
# It's a python file; import it. Make sure to set the
# path correctly.
basedir, modname = find_module_from_filename(filename)
orig_path = sys.path[:]
try:
sys.path.insert(0, basedir)
module = import_from_name(modname)
finally:
sys.path[:] = orig_path
else:
module = import_from_name(name)
# Find tests.
tests = DocTestFinder().find(module)
if testname is not None:
testname = '%s.%s' % (module.__name__, testname)
tests = [t for t in tests if t.name.startswith(testname)]
if len(tests) == 0:
raise ValueError("test not found")
return tests
def import_from_name(name):
try:
return __import__(name, globals(), locals(), ['*'])
except Exception, e:
raise ValueError, str(e)
except:
raise ValueError, 'Error importing %r' % name
def find_module_from_filename(filename):
"""
Given a filename, return a tuple `(basedir, module)`, where
`module` is the module's name, and `basedir` is the directory it
should be loaded from (this directory should be added to the
path to import it). Packages are handled correctly.
"""
(basedir, file) = os.path.split(filename)
(module_name, ext) = os.path.splitext(file)
# If it's a package, then import with the directory name (don't
# use __init__ as the module name).
if module_name == '__init__':
(basedir, module_name) = os.path.split(basedir)
# If it's contained inside a package, then find the base dir.
if (os.path.exists(os.path.join(basedir, '__init__.py')) or
os.path.exists(os.path.join(basedir, '__init__.pyc')) or
os.path.exists(os.path.join(basedir, '__init__.pyw'))):
package = []
while os.path.exists(os.path.join(basedir, '__init__.py')):
(basedir,dir) = os.path.split(basedir)
if dir == '': break
package.append(dir)
package.reverse()
module_name = '.'.join(package+[module_name])
return (basedir, module_name)
###########################################################################
# Basic Actions
###########################################################################
def run(names, optionflags, verbosity):
suite = unittest.TestSuite()
for name in names:
try:
for test in find(name):
suite.addTest(DocTestCase(test, optionflags))
except ValueError, e:
print >>sys.stderr, ('%s: Error processing %s -- %s' %
(sys.argv[0], name, e))
unittest.TextTestRunner(verbosity=verbosity).run(suite)
def debug(names, optionflags, verbosity, pm=True):
debugger = Debugger()
for name in names:
try:
for test in find(name):
debugger.debug(test, pm)
except ValueError, e:
raise
print >>sys.stderr, ('%s: Error processing %s -- %s' %
(sys.argv[0], name, e))
def update(names, optionflags, verbosity):
parser = DocTestParser()
runner = UpdateRunner(verbose=True)
for name in names:
try:
# Make sure we're running on a text file.
tests = find(name)
if len(tests) != 1 or tests[0].lineno != 0:
raise ValueError('update can only be used with text files')
test = tests[0]
# Run the updater!
(failures, tries) = runner.run(test)
# Confirm the changes.
if failures == 0:
print 'No updates needed!'
else:
print '*'*70
print '%d examples updated.' % failures
print '-'*70
sys.stdout.write('Accept updates? [y/N] ')
sys.stdout.flush()
if sys.stdin.readline().lower().strip() in ('y', 'yes'):
# Make a backup of the original contents.
backup = test.filename+'.bak'
print 'Renaming %s -> %s' % (name, backup)
os.rename(test.filename, backup)
# Write the new contents.
print 'Writing updated version to %s' % test.filename
out = open(test.filename, 'w')
out.write(test.docstring)
out.close()
else:
print 'Updates rejected!'
except ValueError, e:
raise
print >>sys.stderr, ('%s: Error processing %s -- %s' %
(sys.argv[0], name, e))
###########################################################################
# Main script
###########################################################################
# Action options
CHECK_OPT = Option("--check",
action="store_const", dest="action", const="check",
default="check",
help="Verify the output of the doctest examples in the "
"given files.")
UPDATE_OPT = Option("--update", "-u",
action="store_const", dest="action", const="update",
help="Update the expected output for new or out-of-date "
"doctest examples in the given files. In "
"particular, find every example whose actual output "
"does not match its expected output; and replace its "
"expected output with its actual output. You will "
"be asked to verify the changes before they are "
"written back to the file; be sure to check them over "
"carefully, to ensure that you don't accidentally "
"create broken test cases.")
DEBUG_OPT = Option("--debug",
action="store_const", dest="action", const="debug",
help="Verify the output of the doctest examples in the "
"given files. If any example fails, then enter the "
"python debugger.")
# Reporting options
VERBOSE_OPT = Option("-v", "--verbose",
action="count", dest="verbosity", default=1,
help="Increase verbosity.")
QUIET_OPT = Option("-q", "--quiet",
action="store_const", dest="verbosity", const=0,
help="Decrease verbosity.")
UDIFF_OPT = Option("--udiff", '-d',
action="store_const", dest="udiff", const=1, default=0,
help="Display test failures using unified diffs.")
CDIFF_OPT = Option("--cdiff",
action="store_const", dest="cdiff", const=1, default=0,
help="Display test failures using context diffs.")
NDIFF_OPT = Option("--ndiff",
action="store_const", dest="ndiff", const=1, default=0,
help="Display test failures using ndiffs.")
# Output Comparison options
ELLIPSIS_OPT = Option("--ellipsis",
action="store_const", dest="ellipsis", const=1, default=0,
help="Allow \"...\" to be used for ellipsis in the "
"expected output.")
NORMWS_OPT = Option("--normalize_whitespace",
action="store_const", dest="normws", const=1, default=0,
help="Ignore whitespace differences between "
"the expected output and the actual output.")
def main():
# Create the option parser.
optparser = OptionParser(usage='%prog [options] NAME ...',
version="Edloper's Doctest Driver, "
"version %s" % __version__)
action_group = OptionGroup(optparser, 'Actions (default=check)')
action_group.add_options([CHECK_OPT, UPDATE_OPT, DEBUG_OPT])
optparser.add_option_group(action_group)
reporting_group = OptionGroup(optparser, 'Reporting')
reporting_group.add_options([VERBOSE_OPT, QUIET_OPT,
UDIFF_OPT, CDIFF_OPT, NDIFF_OPT])
optparser.add_option_group(reporting_group)
compare_group = OptionGroup(optparser, 'Output Comparison')
compare_group.add_options([ELLIPSIS_OPT, NORMWS_OPT])
optparser.add_option_group(compare_group)
# Extract optionflags and the list of file names.
optionvals, names = optparser.parse_args()
if len(names) == 0:
optparser.error("No files specified")
optionflags = (optionvals.udiff * REPORT_UDIFF |
optionvals.cdiff * REPORT_CDIFF |
optionvals.ellipsis * ELLIPSIS |
optionvals.normws * NORMALIZE_WHITESPACE)
# Perform the requested action.
if optionvals.action == 'check':
run(names, optionflags, optionvals.verbosity)
elif optionvals.action == 'update':
update(names, optionflags, optionvals.verbosity)
elif optionvals.action == 'debug':
debug(names, optionflags, optionvals.verbosity)
else:
optparser.error('INTERNAL ERROR: Bad action %s' % optionvals.action)
if __name__ == '__main__': main()
| Python |
# Natural Language Toolkit: Unit Tests
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: __init__.py 3289 2006-06-02 13:32:13Z stevenbird $
"""
Unit tests for the NLTK modules. These tests are intented to ensure
that changes that we make to NLTK's code don't accidentally introduce
bugs.
Each module in this package tests a specific aspect of NLTK. Modules
are typically named for the module or class that they test (e.g.,
L{nltk_lite.test.tree} performs tests on the L{nltk_lite.parse.tree}
module).
Use doctest_driver.py to run the tests:
doctest_driver.py --help
NB. Popular options for NLTK documentation are:
--ellipsis --normalize_whitespace
"""
| Python |
# Natural Language Toolkit: A Chart Parser
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# Jean Mark Gawron <gawron@mail.sdsu.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: chart.py 3460 2006-10-06 10:39:03Z stevenbird $
"""
Data classes and parser implementations for \"chart parsers\", which
use dynamic programming to efficiently parse a text. A X{chart
parser} derives parse trees for a text by iteratively adding \"edges\"
to a \"chart.\" Each X{edge} represents a hypothesis about the tree
structure for a subsequence of the text. The X{chart} is a
\"blackboard\" for composing and combining these hypotheses.
When a chart parser begins parsing a text, it creates a new (empty)
chart, spanning the text. It then incrementally adds new edges to the
chart. A set of X{chart rules} specifies the conditions under which
new edges should be added to the chart. Once the chart reaches a
stage where none of the chart rules adds any new edges, parsing is
complete.
Charts are encoded with the L{Chart} class, and edges are encoded with
the L{TreeEdge} and L{LeafEdge} classes. The chart parser module
defines three chart parsers:
- C{ChartParse} is a simple and flexible chart parser. Given a
set of chart rules, it will apply those rules to the chart until
no more edges are added.
- C{SteppingChartParse} is a subclass of C{ChartParse} that can
be used to step through the parsing process.
- C{EarleyChartParse} is an implementation of the Earley chart parsing
algorithm. It makes a single left-to-right pass through the
chart, and applies one of three rules (predictor, scanner, and
completer) to each edge it encounters.
"""
import re
from en.parser.nltk_lite.parse import ParseI, AbstractParse
from en.parser.nltk_lite.parse.tree import Tree
from en.parser.nltk_lite.parse import cfg
########################################################################
## Edges
########################################################################
class EdgeI(object):
"""
A hypothesis about the structure of part of a sentence.
Each edge records the fact that a structure is (partially)
consistent with the sentence. An edge contains:
- A X{span}, indicating what part of the sentence is
consistent with the hypothesized structure.
- A X{left-hand side}, specifying what kind of structure is
hypothesized.
- A X{right-hand side}, specifying the contents of the
hypothesized structure.
- A X{dot position}, indicating how much of the hypothesized
structure is consistent with the sentence.
Every edge is either X{complete} or X{incomplete}:
- An edge is X{complete} if its structure is fully consistent
with the sentence.
- An edge is X{incomplete} if its structure is partially
consistent with the sentence. For every incomplete edge, the
span specifies a possible prefix for the edge's structure.
There are two kinds of edge:
- C{TreeEdges<TreeEdge>} record which trees have been found to
be (partially) consistent with the text.
- C{LeafEdges<leafEdge>} record the tokens occur in the text.
The C{EdgeI} interface provides a common interface to both types
of edge, allowing chart parsers to treat them in a uniform manner.
"""
def __init__(self):
if self.__class__ == EdgeI:
raise TypeError('Edge is an abstract interface')
#////////////////////////////////////////////////////////////
# Span
#////////////////////////////////////////////////////////////
def span(self):
"""
@return: A tuple C{(s,e)}, where C{subtokens[s:e]} is the
portion of the sentence that is consistent with this
edge's structure.
@rtype: C{(int, int)}
"""
raise AssertionError('EdgeI is an abstract interface')
def start(self):
"""
@return: The start index of this edge's span.
@rtype: C{int}
"""
raise AssertionError('EdgeI is an abstract interface')
def end(self):
"""
@return: The end index of this edge's span.
@rtype: C{int}
"""
raise AssertionError('EdgeI is an abstract interface')
def length(self):
"""
@return: The length of this edge's span.
@rtype: C{int}
"""
raise AssertionError('EdgeI is an abstract interface')
#////////////////////////////////////////////////////////////
# Left Hand Side
#////////////////////////////////////////////////////////////
def lhs(self):
"""
@return: This edge's left-hand side, which specifies what kind
of structure is hypothesized by this edge.
@see: L{TreeEdge} and L{LeafEdge} for a description of
the left-hand side values for each edge type.
"""
raise AssertionError('EdgeI is an abstract interface')
#////////////////////////////////////////////////////////////
# Right Hand Side
#////////////////////////////////////////////////////////////
def rhs(self):
"""
@return: This edge's right-hand side, which specifies
the content of the structure hypothesized by this
edge.
@see: L{TreeEdge} and L{LeafEdge} for a description of
the right-hand side values for each edge type.
"""
raise AssertionError('EdgeI is an abstract interface')
def dot(self):
"""
@return: This edge's dot position, which indicates how much of
the hypothesized structure is consistent with the
sentence. In particular, C{self.rhs[:dot]} is consistent
with C{subtoks[self.start():self.end()]}.
@rtype: C{int}
"""
raise AssertionError('EdgeI is an abstract interface')
def next(self):
"""
@return: The element of this edge's right-hand side that
immediately follows its dot.
@rtype: C{Nonterminal} or X{terminal} or C{None}
"""
raise AssertionError('EdgeI is an abstract interface')
def is_complete(self):
"""
@return: True if this edge's structure is fully consistent
with the text.
@rtype: C{boolean}
"""
raise AssertionError('EdgeI is an abstract interface')
def is_incomplete(self):
"""
@return: True if this edge's structure is partially consistent
with the text.
@rtype: C{boolean}
"""
raise AssertionError('EdgeI is an abstract interface')
#////////////////////////////////////////////////////////////
# Comparisons
#////////////////////////////////////////////////////////////
def __cmp__(self, other):
raise AssertionError('EdgeI is an abstract interface')
def __hash__(self, other):
raise AssertionError('EdgeI is an abstract interface')
class TreeEdge(EdgeI):
"""
An edge that records the fact that a tree is (partially)
consistent with the sentence. A tree edge consists of:
- A X{span}, indicating what part of the sentence is
consistent with the hypothesized tree.
- A X{left-hand side}, specifying the hypothesized tree's node
value.
- A X{right-hand side}, specifying the hypothesized tree's
children. Each element of the right-hand side is either a
terminal, specifying a token with that terminal as its leaf
value; or a nonterminal, specifying a subtree with that
nonterminal's symbol as its node value.
- A X{dot position}, indicating which children are consistent
with part of the sentence. In particular, if C{dot} is the
dot position, C{rhs} is the right-hand size, C{(start,end)}
is the span, and C{sentence} is the list of subtokens in the
sentence, then C{subtokens[start:end]} can be spanned by the
children specified by C{rhs[:dot]}.
For more information about edges, see the L{EdgeI} interface.
"""
def __init__(self, span, lhs, rhs, dot=0):
"""
Construct a new C{TreeEdge}.
@type span: C{(int, int)}
@param span: A tuple C{(s,e)}, where C{subtokens[s:e]} is the
portion of the sentence that is consistent with the new
edge's structure.
@type lhs: L{Nonterminal}
@param lhs: The new edge's left-hand side, specifying the
hypothesized tree's node value.
@type rhs: C{list} of (L{Nonterminal} and C{string})
@param rhs: The new edge's right-hand side, specifying the
hypothesized tree's children.
@type dot: C{int}
@param dot: The position of the new edge's dot. This position
specifies what prefix of the production's right hand side
is consistent with the text. In particular, if
C{sentence} is the list of subtokens in the sentence, then
C{subtokens[span[0]:span[1]]} can be spanned by the
children specified by C{rhs[:dot]}.
"""
self._lhs = lhs
self._rhs = tuple(rhs)
self._span = span
self._dot = dot
# [staticmethod]
def from_production(production, index):
"""
@return: A new C{TreeEdge} formed from the given production.
The new edge's left-hand side and right-hand side will
be taken from C{production}; its span will be C{(index,
index)}; and its dot position will be C{0}.
@rtype: L{TreeEdge}
"""
return TreeEdge(span=(index, index), lhs=production.lhs(),
rhs=production.rhs(), dot=0)
from_production = staticmethod(from_production)
# Accessors
def lhs(self): return self._lhs
def span(self): return self._span
def start(self): return self._span[0]
def end(self): return self._span[1]
def length(self): return self._span[1] - self._span[0]
def rhs(self): return self._rhs
def dot(self): return self._dot
def is_complete(self): return self._dot == len(self._rhs)
def is_incomplete(self): return self._dot != len(self._rhs)
def next(self):
if self._dot >= len(self._rhs): return None
else: return self._rhs[self._dot]
# Comparisons & hashing
def __cmp__(self, other):
if self.__class__ != other.__class__: return -1
return cmp((self._span, self.lhs(), self.rhs(), self._dot),
(other._span, other.lhs(), other.rhs(), other._dot))
def __hash__(self):
return hash((self.lhs(), self.rhs(), self._span, self._dot))
# String representation
def __str__(self):
str = '[%s:%s] ' % (self._span[0], self._span[1])
str += '%-2s ->' % (self._lhs.symbol(),)
for i in range(len(self._rhs)):
if i == self._dot: str += ' *'
if isinstance(self._rhs[i], cfg.Nonterminal):
str += ' %s' % (self._rhs[i].symbol(),)
else:
str += ' %r' % (self._rhs[i],)
if len(self._rhs) == self._dot: str += ' *'
return str
def __repr__(self):
return '[Edge: %s]' % self
class LeafEdge(EdgeI):
"""
An edge that records the fact that a leaf value is consistent with
a word in the sentence. A leaf edge consists of:
- An X{index}, indicating the position of the word.
- A X{leaf}, specifying the word's content.
A leaf edge's left-hand side is its leaf value, and its right hand
side is C{()}. Its span is C{[index, index+1]}, and its dot
position is C{0}.
"""
def __init__(self, leaf, index):
"""
Construct a new C{LeafEdge}.
@param leaf: The new edge's leaf value, specifying the word
that is recorded by this edge.
@param index: The new edge's index, specifying the position of
the word that is recorded by this edge.
"""
self._leaf = leaf
self._index = index
# Accessors
def lhs(self): return self._leaf
def span(self): return (self._index, self._index+1)
def start(self): return self._index
def end(self): return self._index+1
def length(self): return 1
def rhs(self): return ()
def dot(self): return 0
def is_complete(self): return True
def is_incomplete(self): return False
def next(self): return None
# Comparisons & hashing
def __cmp__(self, other):
if not isinstance(other, LeafEdge): return -1
return cmp((self._index, self._leaf), (other._index, other._leaf))
def __hash__(self):
return hash((self._index, self._leaf))
# String representations
def __str__(self): return '[%s:%s] %r' % (self._index, self._index+1, self._leaf)
def __repr__(self):
return '[Edge: %s]' % (self)
########################################################################
## Chart
########################################################################
class Chart(object):
"""
A blackboard for hypotheses about the syntactic constituents of a
sentence. A chart contains a set of edges, and each edge encodes
a single hypothesis about the structure of some portion of the
sentence.
The L{select} method can be used to select a specific collection
of edges. For example C{chart.select(is_complete=True, start=0)}
yields all complete edges whose start indices are 0. To ensure
the efficiency of these selection operations, C{Chart} dynamically
creates and maintains an index for each set of attributes that
have been selected on.
In order to reconstruct the trees that are represented by an edge,
the chart associates each edge with a set of child pointer lists.
A X{child pointer list} is a list of the edges that license an
edge's right-hand side.
@ivar _tokens: The sentence that the chart covers.
@ivar _num_leaves: The number of tokens.
@ivar _edges: A list of the edges in the chart
@ivar _edge_to_cpls: A dictionary mapping each edge to a set
of child pointer lists that are associated with that edge.
@ivar _indexes: A dictionary mapping tuples of edge attributes
to indices, where each index maps the corresponding edge
attribute values to lists of edges.
"""
def __init__(self, tokens):
"""
Construct a new empty chart.
@type tokens: L{list}
@param tokens: The sentence that this chart will be used to parse.
"""
# Record the sentence token and the sentence length.
self._tokens = list(tokens)
self._num_leaves = len(self._tokens)
# A list of edges contained in this chart.
self._edges = []
# The set of child pointer lists associated with each edge.
self._edge_to_cpls = {}
# Indexes mapping attribute values to lists of edges (used by
# select()).
self._indexes = {}
#////////////////////////////////////////////////////////////
# Sentence Access
#////////////////////////////////////////////////////////////
def num_leaves(self):
"""
@return: The number of words in this chart's sentence.
@rtype: C{int}
"""
return self._num_leaves
def leaf(self, index):
"""
@return: The leaf value of the word at the given index.
@rtype: C{string}
"""
return self._tokens[index]
def leaves(self):
"""
@return: A list of the leaf values of each word in the
chart's sentence.
@rtype: C{list} of C{string}
"""
return self._tokens
#////////////////////////////////////////////////////////////
# Edge access
#////////////////////////////////////////////////////////////
def edges(self):
"""
@return: A list of all edges in this chart. New edges
that are added to the chart after the call to edges()
will I{not} be contained in this list.
@rtype: C{list} of L{EdgeI}
@see: L{iteredges}, L{select}
"""
return self._edges[:]
def iteredges(self):
"""
@return: An iterator over the edges in this chart. Any
new edges that are added to the chart before the iterator
is exahusted will also be generated.
@rtype: C{iter} of L{EdgeI}
@see: L{edges}, L{select}
"""
return iter(self._edges)
# Iterating over the chart yields its edges.
__iter__ = iteredges
def num_edges(self):
"""
@return: The number of edges contained in this chart.
@rtype: C{int}
"""
return len(self._edge_to_cpls)
def select(self, **restrictions):
"""
@return: An iterator over the edges in this chart. Any
new edges that are added to the chart before the iterator
is exahusted will also be generated. C{restrictions}
can be used to restrict the set of edges that will be
generated.
@rtype: C{iter} of L{EdgeI}
@kwarg span: Only generate edges C{e} where C{e.span()==span}
@kwarg start: Only generate edges C{e} where C{e.start()==start}
@kwarg end: Only generate edges C{e} where C{e.end()==end}
@kwarg length: Only generate edges C{e} where C{e.length()==length}
@kwarg lhs: Only generate edges C{e} where C{e.lhs()==lhs}
@kwarg rhs: Only generate edges C{e} where C{e.rhs()==rhs}
@kwarg next: Only generate edges C{e} where C{e.next()==next}
@kwarg dot: Only generate edges C{e} where C{e.dot()==dot}
@kwarg is_complete: Only generate edges C{e} where
C{e.is_complete()==is_complete}
@kwarg is_incomplete: Only generate edges C{e} where
C{e.is_incomplete()==is_incomplete}
"""
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(self._edges)
# Find the index corresponding to the given restrictions.
restr_keys = restrictions.keys()
restr_keys.sort()
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if not self._indexes.has_key(restr_keys):
self._add_index(restr_keys)
vals = [restrictions[k] for k in restr_keys]
return iter(self._indexes[restr_keys].get(tuple(vals), []))
def _add_index(self, restr_keys):
"""
A helper function for L{select}, which creates a new index for
a given set of attributes (aka restriction keys).
"""
# Make sure it's a valid index.
for k in restr_keys:
if not hasattr(EdgeI, k):
raise ValueError, 'Bad restriction: %s' % k
# Create the index.
self._indexes[restr_keys] = {}
# Add all existing edges to the index.
for edge in self._edges:
vals = [getattr(edge, k)() for k in restr_keys]
index = self._indexes[restr_keys]
index.setdefault(tuple(vals),[]).append(edge)
#////////////////////////////////////////////////////////////
# Edge Insertion
#////////////////////////////////////////////////////////////
def insert(self, edge, child_pointer_list):
"""
Add a new edge to the chart.
@type edge: L{Edge}
@param edge: The new edge
@type child_pointer_list: C{tuple} of L{Edge}
@param child_pointer_list: A list of the edges that were used to
form this edge. This list is used to reconstruct the trees
(or partial trees) that are associated with C{edge}.
@rtype: C{bool}
@return: True if this operation modified the chart. In
particular, return true iff the chart did not already
contain C{edge}, or if it did not already associate
C{child_pointer_list} with C{edge}.
"""
# Is it a new edge?
if not self._edge_to_cpls.has_key(edge):
# Add it to the list of edges.
self._edges.append(edge)
# Register with indexes
for (restr_keys, index) in self._indexes.items():
vals = [getattr(edge, k)() for k in restr_keys]
index = self._indexes[restr_keys]
index.setdefault(tuple(vals),[]).append(edge)
# Get the set of child pointer lists for this edge.
cpls = self._edge_to_cpls.setdefault(edge,{})
child_pointer_list = tuple(child_pointer_list)
if cpls.has_key(child_pointer_list):
# We've already got this CPL; return false.
return False
else:
# It's a new CPL; register it, and return true.
cpls[child_pointer_list] = True
return True
#////////////////////////////////////////////////////////////
# Tree extraction & child pointer lists
#////////////////////////////////////////////////////////////
def parses(self, root, tree_class=Tree):
"""
@return: A list of the complete tree structures that span
the entire chart, and whose root node is C{root}.
"""
trees = []
for edge in self.select(span=(0,self._num_leaves), lhs=root):
trees += self.trees(edge, tree_class=tree_class, complete=True)
return trees
def trees(self, edge, tree_class=Tree, complete=False):
"""
@return: A list of the tree structures that are associated
with C{edge}.
If C{edge} is incomplete, then the unexpanded children will be
encoded as childless subtrees, whose node value is the
corresponding terminal or nonterminal.
@rtype: C{list} of L{Tree}
@note: If two trees share a common subtree, then the same
C{Tree} may be used to encode that subtree in
both trees. If you need to eliminate this subtree
sharing, then create a deep copy of each tree.
"""
return self._trees(edge, complete, memo={}, tree_class=tree_class)
def _trees(self, edge, complete, memo, tree_class):
"""
A helper function for L{trees}.
@param memo: A dictionary used to record the trees that we've
generated for each edge, so that when we see an edge more
than once, we can reuse the same trees.
"""
# If we've seen this edge before, then reuse our old answer.
if memo.has_key(edge): return memo[edge]
trees = []
# when we're reading trees off the chart, don't use incomplete edges
if complete and edge.is_incomplete():
return trees
# Until we're done computing the trees for edge, set
# memo[edge] to be empty. This has the effect of filtering
# out any cyclic trees (i.e., trees that contain themselves as
# descendants), because if we reach this edge via a cycle,
# then it will appear that the edge doesn't generate any
# trees.
memo[edge] = []
# Leaf edges.
if isinstance(edge, LeafEdge):
leaf = self._tokens[edge.start()]
memo[edge] = leaf
return [leaf]
# Each child pointer list can be used to form trees.
for cpl in self.child_pointer_lists(edge):
# Get the set of child choices for each child pointer.
# child_choices[i] is the set of choices for the tree's
# ith child.
child_choices = [self._trees(cp, complete, memo, tree_class)
for cp in cpl]
# Kludge to ensure child_choices is a doubly-nested list
if len(child_choices) > 0 and type(child_choices[0]) == type(""):
child_choices = [child_choices]
# For each combination of children, add a tree.
for children in self._choose_children(child_choices):
lhs = edge.lhs().symbol()
trees.append(tree_class(lhs, children))
# If the edge is incomplete, then extend it with "partial trees":
if edge.is_incomplete():
unexpanded = [tree_class(elt,[])
for elt in edge.rhs()[edge.dot():]]
for tree in trees:
tree.extend(unexpanded)
# Update the memoization dictionary.
memo[edge] = trees
# Return the list of trees.
return trees
def _choose_children(self, child_choices):
"""
A helper function for L{_trees} that finds the possible sets
of subtrees for a new tree.
@param child_choices: A list that specifies the options for
each child. In particular, C{child_choices[i]} is a list of
tokens and subtrees that can be used as the C{i}th child.
"""
children_lists = [[]]
for child_choice in child_choices:
children_lists = [child_list+[child]
for child in child_choice
for child_list in children_lists]
return children_lists
def child_pointer_lists(self, edge):
"""
@rtype: C{list} of C{list} of C{Edge}
@return: The set of child pointer lists for the given edge.
Each child pointer list is a list of edges that have
been used to form this edge.
"""
# Make a copy, in case they modify it.
return self._edge_to_cpls.get(edge, {}).keys()
#////////////////////////////////////////////////////////////
# Display
#////////////////////////////////////////////////////////////
def pp_edge(self, edge, width=None):
"""
@return: A pretty-printed string representation of a given edge
in this chart.
@rtype: C{string}
@param width: The number of characters allotted to each
index in the sentence.
"""
if width is None: width = 50/(self.num_leaves()+1)
(start, end) = (edge.start(), edge.end())
str = '|' + ('.'+' '*(width-1))*start
# Zero-width edges are "#" if complete, ">" if incomplete
if start == end:
if edge.is_complete(): str += '#'
else: str += '>'
# Spanning complete edges are "[===]"; Other edges are
# "[---]" if complete, "[--->" if incomplete
elif edge.is_complete() and edge.span() == (0,self._num_leaves):
str += '['+('='*width)*(end-start-1) + '='*(width-1)+']'
elif edge.is_complete():
str += '['+('-'*width)*(end-start-1) + '-'*(width-1)+']'
else:
str += '['+('-'*width)*(end-start-1) + '-'*(width-1)+'>'
str += (' '*(width-1)+'.')*(self._num_leaves-end)
return str + '| %s ' % edge
def pp_leaves(self, width=None):
"""
@return: A pretty-printed string representation of this
chart's leaves. This string can be used as a header
for calls to L{pp_edge}.
"""
if width is None: width = 50/(self.num_leaves()+1)
if self._tokens is not None and width>1:
header = '|.'
for tok in self._tokens:
header += tok[:width-1].center(width-1)+'.'
header += '|'
else:
header = ''
return header
def pp(self, width=None):
"""
@return: A pretty-printed string representation of this chart.
@rtype: C{string}
@param width: The number of characters allotted to each
index in the sentence.
"""
if width is None: width = 50/(self.num_leaves()+1)
# sort edges: primary key=length, secondary key=start index.
# (and filter out the token edges)
edges = [(e.length(), e.start(), e) for e in self]
edges.sort()
edges = [e for (_,_,e) in edges]
return (self.pp_leaves(width) + '\n' +
'\n'.join([self.pp_edge(edge, width) for edge in edges]))
#////////////////////////////////////////////////////////////
# Display: Dot (AT&T Graphviz)
#////////////////////////////////////////////////////////////
def dot_digraph(self):
# Header
s = 'digraph nltk_chart {\n'
#s += ' size="5,5";\n'
s += ' rankdir=LR;\n'
s += ' node [height=0.1,width=0.1];\n'
s += ' node [style=filled, color="lightgray"];\n'
# Set up the nodes
for y in range(self.num_edges(), -1, -1):
if y == 0:
s += ' node [style=filled, color="black"];\n'
for x in range(self.num_leaves()+1):
if y == 0 or (x <= self._edges[y-1].start() or
x >= self._edges[y-1].end()):
s += ' %04d.%04d [label=""];\n' % (x,y)
# Add a spacer
s += ' x [style=invis]; x->0000.0000 [style=invis];\n'
# Declare ranks.
for x in range(self.num_leaves()+1):
s += ' {rank=same;'
for y in range(self.num_edges()+1):
if y == 0 or (x <= self._edges[y-1].start() or
x >= self._edges[y-1].end()):
s += ' %04d.%04d' % (x,y)
s += '}\n'
# Add the leaves
s += ' edge [style=invis, weight=100];\n'
s += ' node [shape=plaintext]\n'
s += ' 0000.0000'
for x in range(self.num_leaves()):
s += '->%s->%04d.0000' % (self.leaf(x), x+1)
s += ';\n\n'
# Add the edges
s += ' edge [style=solid, weight=1];\n'
for y, edge in enumerate(self):
for x in range(edge.start()):
s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' %
(x, y+1, x+1, y+1))
s += (' %04d.%04d -> %04d.%04d [label="%s"];\n' %
(edge.start(), y+1, edge.end(), y+1, edge))
for x in range(edge.end(), self.num_leaves()):
s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' %
(x, y+1, x+1, y+1))
s += '}\n'
return s
########################################################################
## Chart Rules
########################################################################
class ChartRuleI(object):
"""
A rule that specifies what new edges are licensed by any given set
of existing edges. Each chart rule expects a fixed number of
edges, as indicated by the class variable L{NUM_EDGES}. In
particular:
- A chart rule with C{NUM_EDGES=0} specifies what new edges are
licensed, regardless of existing edges.
- A chart rule with C{NUM_EDGES=1} specifies what new edges are
licensed by a single existing edge.
- A chart rule with C{NUM_EDGES=2} specifies what new edges are
licensed by a pair of existing edges.
@type NUM_EDGES: C{int}
@cvar NUM_EDGES: The number of existing edges that this rule uses
to license new edges. Typically, this number ranges from zero
to two.
"""
def apply(self, chart, grammar, *edges):
"""
Add the edges licensed by this rule and the given edges to the
chart.
@type edges: C{list} of L{EdgeI}
@param edges: A set of existing edges. The number of edges
that should be passed to C{apply} is specified by the
L{NUM_EDGES} class variable.
@rtype: C{list} of L{EdgeI}
@return: A list of the edges that were added.
"""
raise AssertionError, 'ChartRuleI is an abstract interface'
def apply_iter(self, chart, grammar, *edges):
"""
@return: A generator that will add edges licensed by this rule
and the given edges to the chart, one at a time. Each
time the generator is resumed, it will either add a new
edge and yield that edge; or return.
@rtype: C{iter} of L{EdgeI}
@type edges: C{list} of L{EdgeI}
@param edges: A set of existing edges. The number of edges
that should be passed to C{apply} is specified by the
L{NUM_EDGES} class variable.
"""
raise AssertionError, 'ChartRuleI is an abstract interface'
def apply_everywhere(self, chart, grammar):
"""
Add all the edges licensed by this rule and the edges in the
chart to the chart.
@rtype: C{list} of L{EdgeI}
@return: A list of the edges that were added.
"""
raise AssertionError, 'ChartRuleI is an abstract interface'
def apply_everywhere_iter(self, chart, grammar):
"""
@return: A generator that will add all edges licensed by
this rule, given the edges that are currently in the
chart, one at a time. Each time the generator is resumed,
it will either add a new edge and yield that edge; or
return.
@rtype: C{iter} of L{EdgeI}
"""
raise AssertionError, 'ChartRuleI is an abstract interface'
class AbstractChartRule(object):
"""
An abstract base class for chart rules. C{AbstractChartRule}
provides:
- A default implementation for C{apply}, based on C{apply_iter}.
- A default implementation for C{apply_everywhere_iter},
based on C{apply_iter}.
- A default implementation for C{apply_everywhere}, based on
C{apply_everywhere_iter}. Currently, this implementation
assumes that C{NUM_EDGES}<=3.
- A default implementation for C{__str__}, which returns a
name basd on the rule's class name.
"""
def __init__(self):
# This is a sanity check, to make sure that NUM_EDGES is
# consistant with apply() and apply_iter():
for method in self.apply, self.apply_iter:
num_args = method.im_func.func_code.co_argcount
has_vararg = method.im_func.func_code.co_flags & 4
if num_args != self.NUM_EDGES+3 and not has_vararg:
raise AssertionError('NUM_EDGES is incorrect in for %s.%s' %
(self.__class__, func.__name__))
# Subclasses must define apply_iter.
def apply_iter(self, chart, grammar, *edges):
raise AssertionError, 'AbstractChartRule is an abstract class'
# Default: loop through the given number of edges, and call
# self.apply() for each set of edges.
def apply_everywhere_iter(self, chart, grammar):
if self.NUM_EDGES == 0:
for new_edge in self.apply_iter(chart, grammar):
yield new_edge
elif self.NUM_EDGES == 1:
for e1 in chart:
for new_edge in self.apply_iter(chart, grammar, e1):
yield new_edge
elif self.NUM_EDGES == 2:
for e1 in chart:
for e2 in chart:
for new_edge in self.apply_iter(chart, grammar, e1, e2):
yield new_edge
elif self.NUM_EDGES == 3:
for e1 in chart:
for e2 in chart:
for e3 in chart:
for new_edge in self.apply_iter(chart,grammar,e1,e2,e3):
yield new_edge
else:
raise AssertionError, 'NUM_EDGES>3 is not currently supported'
# Default: delegate to apply_iter.
def apply(self, chart, grammar, *edges):
return list(self.apply_iter(chart, grammar, *edges))
# Default: delegate to apply_everywhere_iter.
def apply_everywhere(self, chart, grammar):
return list(self.apply_everywhere_iter(chart, grammar))
# Default: return a name based on the class name.
def __str__(self):
# Add spaces between InitialCapsWords.
return re.sub('([a-z])([A-Z])', r'\1 \2', self.__class__.__name__)
#////////////////////////////////////////////////////////////
# Fundamental Rule
#////////////////////////////////////////////////////////////
class FundamentalRule(AbstractChartRule):
"""
A rule that joins two adjacent edges to form a single combined
edge. In particular, this rule specifies that any pair of edges:
- [AS{->}S{alpha}*BS{beta}][i:j]
- [BS{->}S{gamma}*][j:k]
licenses the edge:
- [AS{->}S{alpha}B*S{beta}][i:j]
"""
NUM_EDGES = 2
def apply_iter(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.end() == right_edge.start() and
left_edge.next() == right_edge.lhs() and
left_edge.is_incomplete() and right_edge.is_complete()):
return
# Construct the new edge.
new_edge = TreeEdge(span=(left_edge.start(), right_edge.end()),
lhs=left_edge.lhs(), rhs=left_edge.rhs(),
dot=left_edge.dot()+1)
# Add it to the chart, with appropriate child pointers.
changed_chart = False
for cpl1 in chart.child_pointer_lists(left_edge):
if chart.insert(new_edge, cpl1+(right_edge,)):
changed_chart = True
# If we changed the chart, then generate the edge.
if changed_chart: yield new_edge
class SingleEdgeFundamentalRule(AbstractChartRule):
"""
A rule that joins a given edge with adjacent edges in the chart,
to form combined edges. In particular, this rule specifies that
either of the edges:
- [AS{->}S{alpha}*BS{beta}][i:j]
- [BS{->}S{gamma}*][j:k]
licenses the edge:
- [AS{->}S{alpha}B*S{beta}][i:j]
if the other edge is already in the chart.
@note: This is basically L{FundamentalRule}, with one edge is left
unspecified.
"""
NUM_EDGES = 1
_fundamental_rule = FundamentalRule()
def apply_iter(self, chart, grammar, edge1):
fr = self._fundamental_rule
if edge1.is_incomplete():
# edge1 = left_edge; edge2 = right_edge
for edge2 in chart.select(start=edge1.end(), is_complete=True,
lhs=edge1.next()):
for new_edge in fr.apply_iter(chart, grammar, edge1, edge2):
yield new_edge
else:
# edge2 = left_edge; edge1 = right_edge
for edge2 in chart.select(end=edge1.start(), is_complete=False,
next=edge1.lhs()):
for new_edge in fr.apply_iter(chart, grammar, edge2, edge1):
yield new_edge
def __str__(self): return 'Fundamental Rule'
#////////////////////////////////////////////////////////////
# Top-Down Parsing
#////////////////////////////////////////////////////////////
class TopDownInitRule(AbstractChartRule):
"""
A rule licensing edges corresponding to the grammar productions for
the grammar's start symbol. In particular, this rule specifies that:
- [SS{->}*S{alpha}][0:i]
is licensed for each grammar production C{SS{->}S{alpha}}, where
C{S} is the grammar's start symbol.
"""
NUM_EDGES = 0
def apply_iter(self, chart, grammar):
for prod in grammar.productions(lhs=grammar.start()):
new_edge = TreeEdge.from_production(prod, 0)
if chart.insert(new_edge, ()):
yield new_edge
class TopDownExpandRule(AbstractChartRule):
"""
A rule licensing edges corresponding to the grammar productions
for the nonterminal following an incomplete edge's dot. In
particular, this rule specifies that:
- [AS{->}S{alpha}*BS{beta}][i:j]
licenses the edge:
- [BS{->}*S{gamma}][j:j]
for each grammar production C{BS{->}S{gamma}}.
"""
NUM_EDGES = 1
def apply_iter(self, chart, grammar, edge):
if edge.is_complete(): return
for prod in grammar.productions(lhs=edge.next()):
new_edge = TreeEdge.from_production(prod, edge.end())
if chart.insert(new_edge, ()):
yield new_edge
class TopDownMatchRule(AbstractChartRule):
"""
A rule licensing an edge corresponding to a terminal following an
incomplete edge's dot. In particular, this rule specifies that:
- [AS{->}S{alpha}*w{beta}][i:j]
licenses the leaf edge:
- [wS{->}*][j:j+1]
if the C{j}th word in the text is C{w}.
"""
NUM_EDGES = 1
def apply_iter(self, chart, grammar, edge):
if edge.is_complete() or edge.end() >= chart.num_leaves(): return
index = edge.end()
leaf = chart.leaf(index)
if edge.next() == leaf:
new_edge = LeafEdge(leaf, index)
if chart.insert(new_edge, ()):
yield new_edge
# Add a cache, to prevent recalculating.
class CachedTopDownInitRule(TopDownInitRule):
"""
A cached version of L{TopDownInitRule}. After the first time this
rule is applied, it will not generate any more edges.
If C{chart} or C{grammar} are changed, then the cache is flushed.
"""
def __init__(self):
AbstractChartRule.__init__(self)
self._done = (None, None)
def apply_iter(self, chart, grammar):
# If we've already applied this rule, and the chart & grammar
# have not changed, then just return (no new edges to add).
if self._done[0] is chart and self._done[1] is grammar: return
# Add all the edges indicated by the top down init rule.
for e in TopDownInitRule.apply_iter(self, chart, grammar):
yield e
# Record the fact that we've applied this rule.
self._done = (chart, grammar)
def __str__(self): return 'Top Down Init Rule'
class CachedTopDownExpandRule(TopDownExpandRule):
"""
A cached version of L{TopDownExpandRule}. After the first time
this rule is applied to an edge with a given C{end} and C{next},
it will not generate any more edges for edges with that C{end} and
C{next}.
If C{chart} or C{grammar} are changed, then the cache is flushed.
"""
def __init__(self):
AbstractChartRule.__init__(self)
self._done = {}
def apply_iter(self, chart, grammar, edge):
# If we've already applied this rule to an edge with the same
# next & end, and the chart & grammar have not changed, then
# just return (no new edges to add).
done = self._done.get((edge.next(), edge.end()), (None,None))
if done[0] is chart and done[1] is grammar: return
# Add all the edges indicated by the top down expand rule.
for e in TopDownExpandRule.apply_iter(self, chart, grammar, edge):
yield e
# Record the fact that we've applied this rule.
self._done[edge.next(), edge.end()] = (chart, grammar)
def __str__(self): return 'Top Down Expand Rule'
#////////////////////////////////////////////////////////////
# Bottom-Up Parsing
#////////////////////////////////////////////////////////////
class BottomUpInitRule(AbstractChartRule):
"""
A rule licensing any edges corresponding to terminals in the
text. In particular, this rule licenses the leaf edge:
- [wS{->}*][i:i+1]
for C{w} is a word in the text, where C{i} is C{w}'s index.
"""
NUM_EDGES = 0
def apply_iter(self, chart, grammar):
for index in range(chart.num_leaves()):
new_edge = LeafEdge(chart.leaf(index), index)
if chart.insert(new_edge, ()):
yield new_edge
class BottomUpPredictRule(AbstractChartRule):
"""
A rule licensing any edge corresponding to a production whose
right-hand side begins with a complete edge's left-hand side. In
particular, this rule specifies that:
- [AS{->}S{alpha}*]
licenses the edge:
- [BS{->}*AS{beta}]
for each grammar production C{BS{->}AS{beta}}
"""
NUM_EDGES = 1
def apply_iter(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions(rhs=edge.lhs()):
new_edge = TreeEdge.from_production(prod, edge.start())
if chart.insert(new_edge, ()):
yield new_edge
#////////////////////////////////////////////////////////////
# Earley Parsing
#////////////////////////////////////////////////////////////
class CompleterRule(AbstractChartRule):
"""
A rule that joins a given complete edge with adjacent incomplete
edges in the chart, to form combined edges. In particular, this
rule specifies that:
- [BS{->}S{gamma}*][j:k]
licenses the edge:
- [AS{->}S{alpha}B*S{beta}][i:j]
given that the chart contains:
- [AS{->}S{alpha}*BS{beta}][i:j]
@note: This is basically L{FundamentalRule}, with the left edge
left unspecified.
"""
NUM_EDGES = 1
_fundamental_rule = FundamentalRule()
def apply_iter(self, chart, grammar, right_edge):
if right_edge.is_incomplete(): return
fr = self._fundamental_rule
for left_edge in chart.select(end=right_edge.start(),
is_complete=False,
next=right_edge.lhs()):
for e in fr.apply_iter(chart, grammar, left_edge, right_edge):
yield e
def __str__(self): return 'Completer Rule'
class ScannerRule(AbstractChartRule):
"""
A rule licensing a leaf edge corresponding to a part-of-speech
terminal following an incomplete edge's dot. In particular, this
rule specifies that:
- [AS{->}S{alpha}*PS{beta}][i:j]
licenses the edges:
- [PS{->}w*][j:j+1]
- [wS{->}*][j:j+1]
if the C{j}th word in the text is C{w}; and C{P} is a valid part
of speech for C{w}.
"""
NUM_EDGES = 1
def __init__(self, word_to_pos_lexicon):
self._word_to_pos = word_to_pos_lexicon
def apply_iter(self, chart, gramar, edge):
if edge.is_complete() or edge.end()>=chart.num_leaves(): return
index = edge.end()
leaf = chart.leaf(index)
if edge.next() in self._word_to_pos.get(leaf, []):
new_leaf_edge = LeafEdge(leaf, index)
if chart.insert(new_leaf_edge, ()):
yield new_leaf_edge
new_pos_edge = TreeEdge((index,index+1), edge.next(),
[leaf], 1)
if chart.insert(new_pos_edge, (new_leaf_edge,)):
yield new_pos_edge
# This is just another name for TopDownExpandRule:
class PredictorRule(TopDownExpandRule): pass
########################################################################
## Simple Earley Chart Parser
########################################################################
class EarleyChartParse(AbstractParse):
"""
A chart parser implementing the Earley parsing algorithm:
- For each index I{end} in [0, 1, ..., N]:
- For each I{edge} s.t. I{edge}.end = I{end}:
- If I{edge} is incomplete, and I{edge}.next is not a part
of speech:
- Apply PredictorRule to I{edge}
- If I{edge} is incomplete, and I{edge}.next is a part of
speech:
- Apply ScannerRule to I{edge}
- If I{edge} is complete:
- Apply CompleterRule to I{edge}
- Return any complete parses in the chart
C{EarleyChartParse} uses a X{lexicon} to decide whether a leaf
has a given part of speech. This lexicon is encoded as a
dictionary that maps each word to a list of parts of speech that
word can have.
"""
def __init__(self, grammar, lexicon, trace=0):
"""
Create a new Earley chart parser, that uses C{grammar} to
parse texts.
@type grammar: C{cfg.Grammar}
@param grammar: The grammar used to parse texts.
@type lexicon: C{dict} from C{string} to (C{list} of C{string})
@param lexicon: A lexicon of words that records the parts of
speech that each word can have. Each key is a word, and
the corresponding value is a list of parts of speech.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._lexicon = lexicon
self._trace = trace
AbstractParse.__init__(self)
def get_parse_list(self, tokens, tree_class=Tree):
chart = Chart(tokens)
grammar = self._grammar
# Width, for printing trace edges.
w = 50/(chart.num_leaves()+1)
if self._trace > 0: print ' ', chart.pp_leaves(w)
# Initialize the chart with a special "starter" edge.
root = cfg.Nonterminal('[INIT]')
edge = TreeEdge((0,0), root, (grammar.start(),))
chart.insert(edge, ())
# Create the 3 rules:
predictor = PredictorRule()
completer = CompleterRule()
scanner = ScannerRule(self._lexicon)
for end in range(chart.num_leaves()+1):
if self._trace > 1: print 'Processing queue %d' % end
for edge in chart.select(end=end):
if edge.is_incomplete():
for e in predictor.apply(chart, grammar, edge):
if self._trace > 0:
print 'Predictor', chart.pp_edge(e,w)
if edge.is_incomplete():
for e in scanner.apply(chart, grammar, edge):
if self._trace > 0:
print 'Scanner ', chart.pp_edge(e,w)
if edge.is_complete():
for e in completer.apply(chart, grammar, edge):
if self._trace > 0:
print 'Completer', chart.pp_edge(e,w)
# Output a list of complete parses.
return chart.parses(grammar.start(), tree_class=tree_class)
########################################################################
## Generic Chart Parser
########################################################################
TD_STRATEGY = [CachedTopDownInitRule(), CachedTopDownExpandRule(),
TopDownMatchRule(), SingleEdgeFundamentalRule()]
BU_STRATEGY = [BottomUpInitRule(), BottomUpPredictRule(),
SingleEdgeFundamentalRule()]
class ChartParse(AbstractParse):
"""
A generic chart parser. A X{strategy}, or list of
L{ChartRules<ChartRuleI>}, is used to decide what edges to add to
the chart. In particular, C{ChartParse} uses the following
algorithm to parse texts:
- Until no new edges are added:
- For each I{rule} in I{strategy}:
- Apply I{rule} to any applicable edges in the chart.
- Return any complete parses in the chart
"""
def __init__(self, grammar, strategy, trace=0):
"""
Create a new chart parser, that uses C{grammar} to parse
texts.
@type grammar: L{cfg.Grammar}
@param grammar: The grammar used to parse texts.
@type strategy: C{list} of L{ChartRuleI}
@param strategy: A list of rules that should be used to decide
what edges to add to the chart.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._strategy = strategy
self._trace = trace
AbstractParse.__init__(self)
def get_parse_list(self, tokens, tree_class=Tree):
chart = Chart(tokens)
grammar = self._grammar
# Width, for printing trace edges.
w = 50/(chart.num_leaves()+1)
if self._trace > 0: print chart.pp_leaves(w)
edges_added = 1
while edges_added > 0:
edges_added = 0
for rule in self._strategy:
edges_added_by_rule = 0
for e in rule.apply_everywhere(chart, grammar):
if self._trace > 0 and edges_added_by_rule == 0:
print '%s:' % rule
edges_added_by_rule += 1
if self._trace > 1: print chart.pp_edge(e,w)
if self._trace == 1 and edges_added_by_rule > 0:
print ' - Added %d edges' % edges_added_by_rule
edges_added += edges_added_by_rule
# Return a list of complete parses.
return chart.parses(grammar.start(), tree_class=tree_class)
########################################################################
## Stepping Chart Parser
########################################################################
class SteppingChartParse(ChartParse):
"""
A C{ChartParse} that allows you to step through the parsing
process, adding a single edge at a time. It also allows you to
change the parser's strategy or grammar midway through parsing a
text.
The C{initialize} method is used to start parsing a text. C{step}
adds a single edge to the chart. C{set_strategy} changes the
strategy used by the chart parser. C{parses} returns the set of
parses that has been found by the chart parser.
@ivar _restart: Records whether the parser's strategy, grammar,
or chart has been changed. If so, then L{step} must restart
the parsing algorithm.
"""
def __init__(self, grammar, strategy=None, trace=0):
self._chart = None
self._current_chartrule = None
self._restart = False
ChartParse.__init__(self, grammar, strategy, trace)
#////////////////////////////////////////////////////////////
# Initialization
#////////////////////////////////////////////////////////////
def initialize(self, tokens):
"Begin parsing the given tokens."
self._chart = Chart(tokens)
self._restart = True
#////////////////////////////////////////////////////////////
# Stepping
#////////////////////////////////////////////////////////////
def step(self):
"""
@return: A generator that adds edges to the chart, one at a
time. Each time the generator is resumed, it adds a single
edge and yields that edge. If no more edges can be added,
then it yields C{None}.
If the parser's strategy, grammar, or chart is changed, then
the generator will continue adding edges using the new
strategy, grammar, or chart.
Note that this generator never terminates, since the grammar
or strategy might be changed to values that would add new
edges. Instead, it yields C{None} when no more edges can be
added with the current strategy and grammar.
"""
if self._chart is None:
raise ValueError, 'Parser must be initialized first'
while 1:
self._restart = False
w = 50/(self._chart.num_leaves()+1)
for e in self._parse():
if self._trace > 1: print self._current_chartrule
if self._trace > 0: print self._chart.pp_edge(e,w)
yield e
if self._restart: break
else:
yield None # No more edges.
def _parse(self):
"""
A generator that implements the actual parsing algorithm.
L{step} iterates through this generator, and restarts it
whenever the parser's strategy, grammar, or chart is modified.
"""
chart = self._chart
grammar = self._grammar
edges_added = 1
while edges_added > 0:
edges_added = 0
for rule in self._strategy:
self._current_chartrule = rule
for e in rule.apply_everywhere_iter(chart, grammar):
edges_added += 1
yield e
#////////////////////////////////////////////////////////////
# Accessors
#////////////////////////////////////////////////////////////
def strategy(self):
"@return: The strategy used by this parser."
return self._strategy
def grammar(self):
"@return: The grammar used by this parser."
return self._grammar
def chart(self):
"@return: The chart that is used by this parser."
return self._chart
def current_chartrule(self):
"@return: The chart rule used to generate the most recent edge."
return self._current_chartrule
def parses(self, tree_class=Tree):
"@return: The parse trees currently contained in the chart."
return self._chart.parses(self._grammar.start(), tree_class)
#////////////////////////////////////////////////////////////
# Parser modification
#////////////////////////////////////////////////////////////
def set_strategy(self, strategy):
"""
Change the startegy that the parser uses to decide which edges
to add to the chart.
@type strategy: C{list} of L{ChartRuleI}
@param strategy: A list of rules that should be used to decide
what edges to add to the chart.
"""
if strategy == self._strategy: return
self._strategy = strategy[:] # Make a copy.
self._restart = True
def set_grammar(self, grammar):
"Change the grammar used by the parser."
if grammar is self._grammar: return
self._grammar = grammar
self._restart = True
def set_chart(self, chart):
"Load a given chart into the chart parser."
if chart is self._chart: return
self._chart = chart
self._restart = True
#////////////////////////////////////////////////////////////
# Standard parser methods
#////////////////////////////////////////////////////////////
def get_parse_list(self, token, tree_class=Tree):
# Initialize ourselves.
self.initialize(token)
# Step until no more edges are generated.
for e in self.step():
if e is None: break
# Return a list of complete parses.
return self.parses(tree_class=tree_class)
########################################################################
## Demo Code
########################################################################
def demo():
"""
A demonstration of the chart parsers.
"""
import sys, time
# Define some nonterminals
S, VP, NP, PP = cfg.nonterminals('S, VP, NP, PP')
V, N, P, Name, Det = cfg.nonterminals('V, N, P, Name, Det')
# Define some grammatical productions.
grammatical_productions = [
cfg.Production(S, [NP, VP]), cfg.Production(PP, [P, NP]),
cfg.Production(NP, [Det, N]), cfg.Production(NP, [NP, PP]),
cfg.Production(VP, [VP, PP]), cfg.Production(VP, [V, NP]),
cfg.Production(VP, [V]),]
# Define some lexical productions.
lexical_productions = [
cfg.Production(NP, ['John']), cfg.Production(NP, ['I']),
cfg.Production(Det, ['the']), cfg.Production(Det, ['my']),
cfg.Production(Det, ['a']),
cfg.Production(N, ['dog']), cfg.Production(N, ['cookie']),
cfg.Production(V, ['ate']), cfg.Production(V, ['saw']),
cfg.Production(P, ['with']), cfg.Production(P, ['under']),
]
# Convert the grammar productions to an earley-style lexicon.
earley_lexicon = {}
for prod in lexical_productions:
earley_lexicon.setdefault(prod.rhs()[0], []).append(prod.lhs())
# The grammar for ChartParse and SteppingChartParse:
grammar = cfg.Grammar(S, grammatical_productions+lexical_productions)
# The grammar for EarleyChartParse:
earley_grammar = cfg.Grammar(S, grammatical_productions)
# Tokenize a sample sentence.
sent = 'I saw John with a dog with my cookie'
print "Sentence:\n", sent
from en.parser.nltk_lite import tokenize
tokens = list(tokenize.whitespace(sent))
print tokens
# Ask the user which parser to test
print ' 1: Top-down chart parser'
print ' 2: Bottom-up chart parser'
print ' 3: Earley parser'
print ' 4: Stepping chart parser (alternating top-down & bottom-up)'
print ' 5: All parsers'
print '\nWhich parser (1-5)? ',
choice = sys.stdin.readline().strip()
print
if choice not in '12345':
print 'Bad parser number'
return
# Keep track of how long each parser takes.
times = {}
# Run the top-down parser, if requested.
if choice in ('1', '5'):
cp = ChartParse(grammar, TD_STRATEGY, trace=2)
t = time.time()
parses = cp.get_parse_list(tokens)
times['top down'] = time.time()-t
assert len(parses)==5, 'Not all parses found'
for tree in parses: print tree
# Run the bottom-up parser, if requested.
if choice in ('2', '5'):
cp = ChartParse(grammar, BU_STRATEGY, trace=2)
t = time.time()
parses = cp.get_parse_list(tokens)
times['bottom up'] = time.time()-t
assert len(parses)==5, 'Not all parses found'
for tree in parses: print tree
# Run the earley, if requested.
if choice in ('3', '5'):
cp = EarleyChartParse(earley_grammar, earley_lexicon, trace=1)
t = time.time()
parses = cp.get_parse_list(tokens)
times['Earley parser'] = time.time()-t
assert len(parses)==5, 'Not all parses found'
for tree in parses: print tree
# Run the stepping parser, if requested.
if choice in ('4', '5'):
t = time.time()
cp = SteppingChartParse(grammar, trace=1)
cp.initialize(tokens)
for i in range(5):
print '*** SWITCH TO TOP DOWN'
cp.set_strategy(TD_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
print '*** SWITCH TO BOTTOM UP'
cp.set_strategy(BU_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
times['stepping'] = time.time()-t
assert len(cp.parses())==5, 'Not all parses found'
for parse in cp.parses(): print parse
# Print the times of all parsers:
maxlen = max([len(key) for key in times.keys()])
format = '%' + `maxlen` + 's parser: %6.3fsec'
times_items = times.items()
times_items.sort(lambda a,b:cmp(a[1], b[1]))
for (parser, t) in times_items:
print format % (parser, t)
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: Recursive Descent Parser
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from en.parser.nltk_lite.parse import cfg
from tree import *
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse import AbstractParse
from types import *
##//////////////////////////////////////////////////////
## Recursive Descent Parser
##//////////////////////////////////////////////////////
class RecursiveDescent(AbstractParse):
"""
A simple top-down CFG parser that parses texts by recursively
expanding the fringe of a C{Tree}, and matching it against a
text.
C{RecursiveDescent} uses a list of tree locations called a
X{frontier} to remember which subtrees have not yet been expanded
and which leaves have not yet been matched against the text. Each
tree location consists of a list of child indices specifying the
path from the root of the tree to a subtree or a leaf; see the
reference documentation for C{Tree} for more information
about tree locations.
When the parser begins parsing a text, it constructs a tree
containing only the start symbol, and a frontier containing the
location of the tree's root node. It then extends the tree to
cover the text, using the following recursive procedure:
- If the frontier is empty, and the text is covered by the tree,
then return the tree as a possible parse.
- If the frontier is empty, and the text is not covered by the
tree, then return no parses.
- If the first element of the frontier is a subtree, then
use CFG productions to X{expand} it. For each applicable
production, add the expanded subtree's children to the
frontier, and recursively find all parses that can be
generated by the new tree and frontier.
- If the first element of the frontier is a token, then X{match}
it against the next token from the text. Remove the token
from the frontier, and recursively find all parses that can be
generated by the new tree and frontier.
@see: C{nltk.cfg}
"""
def __init__(self, grammar, trace=0):
"""
Create a new C{RecursiveDescent}, that uses C{grammar}
to parse texts.
@type grammar: C{Grammar}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._trace = trace
AbstractParse.__init__(self)
def get_parse_list(self, tokens):
# Inherit docs from ParseI
# Start a recursive descent parse, with an initial tree
# containing just the start symbol.
start = self._grammar.start().symbol()
initial_tree = Tree(start, [])
frontier = [()]
if self._trace:
self._trace_start(initial_tree, frontier, tokens)
parses = self._parse(tokens, initial_tree, frontier)
# Return the parses.
return parses
def _parse(self, remaining_text, tree, frontier):
"""
Recursively expand and match each elements of C{tree}
specified by C{frontier}, to cover C{remaining_text}. Return
a list of all parses found.
@return: A list of all parses that can be generated by
matching and expanding the elements of C{tree}
specified by C{frontier}.
@rtype: C{list} of C{Tree}
@type tree: C{Tree}
@param tree: A partial structure for the text that is
currently being parsed. The elements of C{tree}
that are specified by C{frontier} have not yet been
expanded or matched.
@type remaining_text: C{list} of C{String}s
@param remaining_text: The portion of the text that is not yet
covered by C{tree}.
@type frontier: C{list} of C{tuple} of C{int}
@param frontier: A list of the locations within C{tree} of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched. This list sorted
in left-to-right order of location within the tree.
"""
# If the tree covers the text, and there's nothing left to
# expand, then we've found a complete parse; return it.
if len(remaining_text) == 0 and len(frontier) == 0:
if self._trace:
self._trace_succeed(tree, frontier)
return [tree]
# If there's still text, but nothing left to expand, we failed.
elif len(frontier) == 0:
if self._trace:
self._trace_backtrack(tree, frontier)
return []
# If the next element on the frontier is a tree, expand it.
elif isinstance(tree[frontier[0]], Tree):
return self._expand(remaining_text, tree, frontier)
# If the next element on the frontier is a token, match it.
else:
return self._match(remaining_text, tree, frontier)
def _match(self, rtext, tree, frontier):
"""
@rtype: C{list} of C{Tree}
@return: a list of all parses that can be generated by
matching the first element of C{frontier} against the
first token in C{rtext}. In particular, if the first
element of C{frontier} has the same type as the first
token in C{rtext}, then substitute the token into
C{tree}; and return all parses that can be generated by
matching and expanding the remaining elements of
C{frontier}. If the first element of C{frontier} does not
have the same type as the first token in C{rtext}, then
return empty list.
@type tree: C{Tree}
@param tree: A partial structure for the text that is
currently being parsed. The elements of C{tree}
that are specified by C{frontier} have not yet been
expanded or matched.
@type rtext: C{list} of C{String}s
@param rtext: The portion of the text that is not yet
covered by C{tree}.
@type frontier: C{list} of C{tuple} of C{int}
@param frontier: A list of the locations within C{tree} of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched.
"""
tree_leaf = tree[frontier[0]]
if (len(rtext) > 0 and tree_leaf == rtext[0]):
# If it's a terminal that matches rtext[0], then substitute
# in the token, and continue parsing.
newtree = tree.copy(deep=True)
newtree[frontier[0]] = rtext[0]
if self._trace:
self._trace_match(newtree, frontier[1:], rtext[0])
return self._parse(rtext[1:], newtree, frontier[1:])
else:
# If it's a non-matching terminal, fail.
if self._trace:
self._trace_backtrack(tree, frontier, rtext[:1])
return []
def _expand(self, remaining_text, tree, frontier, production=None):
"""
@rtype: C{list} of C{Tree}
@return: A list of all parses that can be generated by
expanding the first element of C{frontier} with
C{production}. In particular, if the first element of
C{frontier} is a subtree whose node type is equal to
C{production}'s left hand side, then add a child to that
subtree for each element of C{production}'s right hand
side; and return all parses that can be generated by
matching and expanding the remaining elements of
C{frontier}. If the first element of C{frontier} is not a
subtree whose node type is equal to C{production}'s left
hand side, then return an empty list. If C{production} is
not specified, then return a list of all parses that can
be generated by expanding the first element of C{frontier}
with I{any} CFG production.
@type tree: C{Tree}
@param tree: A partial structure for the text that is
currently being parsed. The elements of C{tree}
that are specified by C{frontier} have not yet been
expanded or matched.
@type remaining_text: C{list} of C{String}s
@param remaining_text: The portion of the text that is not yet
covered by C{tree}.
@type frontier: C{list} of C{tuple} of C{int}
@param frontier: A list of the locations within C{tree} of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched.
"""
if production is None: productions = self._grammar.productions()
else: productions = [production]
parses = []
for production in productions:
lhs = production.lhs().symbol()
if lhs == tree[frontier[0]].node:
subtree = self._production_to_tree(production)
if frontier[0] == ():
newtree = subtree
else:
newtree = tree.copy(deep=True)
newtree[frontier[0]] = subtree
new_frontier = [frontier[0]+(i,) for i in
range(len(production.rhs()))]
if self._trace:
self._trace_expand(newtree, new_frontier, production)
parses += self._parse(remaining_text, newtree,
new_frontier + frontier[1:])
return parses
def _production_to_tree(self, production):
"""
@rtype: C{Tree}
@return: The C{Tree} that is licensed by C{production}.
In particular, given the production::
C{[M{lhs} -> M{elt[1]} ... M{elt[n]}]}
Return a tree token that has a node C{M{lhs}.symbol}, and
C{M{n}} children. For each nonterminal element
C{M{elt[i]}} in the production, the tree token has a
childless subtree with node value C{M{elt[i]}.symbol}; and
for each terminal element C{M{elt[j]}}, the tree token has
a leaf token with type C{M{elt[j]}}.
@param production: The CFG production that licenses the tree
token that should be returned.
@type production: C{Production}
"""
children = []
for elt in production.rhs():
if isinstance(elt, cfg.Nonterminal):
children.append(Tree(elt.symbol(), []))
else:
# This will be matched.
children.append(elt)
return Tree(production.lhs().symbol(), children)
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
@type trace: C{int}
@param trace: The trace level. A trace level of C{0} will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
@rtype: C{None}
"""
self._trace = trace
def _trace_fringe(self, tree, treeloc=None):
"""
Print trace output displaying the fringe of C{tree}. The
fringe of C{tree} consists of all of its leaves and all of
its childless subtrees.
@rtype: C{None}
"""
if treeloc == (): print "*",
if isinstance(tree, Tree):
if len(tree) == 0: print `cfg.Nonterminal(tree.node)`,
for i in range(len(tree)):
if treeloc is not None and i == treeloc[0]:
self._trace_fringe(tree[i], treeloc[1:])
else:
self._trace_fringe(tree[i])
else:
print `tree`,
def _trace_tree(self, tree, frontier, operation):
"""
Print trace output displaying the parser's current state.
@param operation: A character identifying the operation that
generated the current state.
@rtype: C{None}
"""
if self._trace == 2: print ' %c [' % operation,
else: print ' [',
if len(frontier) > 0: self._trace_fringe(tree, frontier[0])
else: self._trace_fringe(tree)
print ']'
def _trace_start(self, tree, frontier, text):
print 'Parsing %r' % ' '.join(text)
if self._trace > 2: print 'Start:'
if self._trace > 1: self._trace_tree(tree, frontier, ' ')
def _trace_expand(self, tree, frontier, production):
if self._trace > 2: print 'Expand: %s' % production
if self._trace > 1: self._trace_tree(tree, frontier, 'E')
def _trace_match(self, tree, frontier, tok):
if self._trace > 2: print 'Match: %r' % tok
if self._trace > 1: self._trace_tree(tree, frontier, 'M')
def _trace_succeed(self, tree, frontier):
if self._trace > 2: print 'GOOD PARSE:'
if self._trace == 1: print 'Found a parse:\n%s' % tree
if self._trace > 1: self._trace_tree(tree, frontier, '+')
def _trace_backtrack(self, tree, frontier, toks=None):
if self._trace > 2:
if toks: print 'Backtrack: %r match failed' % toks[0]
else: print 'Backtrack'
##//////////////////////////////////////////////////////
## Stepping Recursive Descent Parser
##//////////////////////////////////////////////////////
class SteppingRecursiveDescent(RecursiveDescent):
"""
A C{RecursiveDescent} that allows you to step through the
parsing process, performing a single operation at a time.
The C{initialize} method is used to start parsing a text.
C{expand} expands the first element on the frontier using a single
CFG production, and C{match} matches the first element on the
frontier against the next text token. C{backtrack} undoes the most
recent expand or match operation. C{step} performs a single
expand, match, or backtrack operation. C{parses} returns the set
of parses that have been found by the parser.
@ivar _history: A list of C{(rtext, tree, frontier)} tripples,
containing the previous states of the parser. This history is
used to implement the C{backtrack} operation.
@ivar _tried_e: A record of all productions that have been tried
for a given tree. This record is used by C{expand} to perform
the next untried production.
@ivar _tried_m: A record of what tokens have been matched for a
given tree. This record is used by C{step} to decide whether
or not to match a token.
@see: C{nltk.cfg}
"""
def __init__(self, grammar, trace=0):
self._grammar = grammar
self._trace = trace
self._rtext = None
self._tree = None
self._frontier = [()]
self._tried_e = {}
self._tried_m = {}
self._history = []
self._parses = []
AbstractParse.__init__(self)
# [XX] TEMPORARY HACK WARNING! This should be replaced with
# something nicer when we get the chance.
def _freeze(self, tree):
c = tree.copy()
# for pos in c.treepositions('leaves'):
# c[pos] = c[pos].freeze()
return ImmutableTree.convert(c)
def get_parse_list(self, tokens):
self.initialize(tokens)
while self.step() is not None: pass
return self.parses()
def initialize(self, tokens):
"""
Start parsing a given text. This sets the parser's tree to
the start symbol, its frontier to the root node, and its
remaining text to C{token['SUBTOKENS']}.
"""
self._rtext = tokens
start = self._grammar.start().symbol()
self._tree = Tree(start, [])
self._frontier = [()]
self._tried_e = {}
self._tried_m = {}
self._history = []
self._parses = []
if self._trace:
self._trace_start(self._tree, self._frontier, self._rtext)
def remaining_text(self):
"""
@return: The portion of the text that is not yet covered by the
tree.
@rtype: C{list} of C{String}
"""
return self._rtext
def frontier(self):
"""
@return: A list of the tree locations of all subtrees that
have not yet been expanded, and all leaves that have not
yet been matched.
@rtype: C{list} of C{tuple} of C{int}
"""
return self._frontier
def tree(self):
"""
@return: A partial structure for the text that is
currently being parsed. The elements specified by the
frontier have not yet been expanded or matched.
@rtype: C{Tree}
"""
return self._tree
def step(self):
"""
Perform a single parsing operation. If an untried match is
possible, then perform the match, and return the matched
token. If an untried expansion is possible, then perform the
expansion, and return the production that it is based on. If
backtracking is possible, then backtrack, and return 1.
Otherwise, return 0.
@return: 0 if no operation was performed; a token if a match
was performed; a production if an expansion was performed;
and 1 if a backtrack operation was performed.
@rtype: C{Production} or C{String} or C{boolean}
"""
# Try matching (if we haven't already)
if self.untried_match():
token = self.match()
if token is not None: return token
# Try expanding.
production = self.expand()
if production is not None: return production
# Try backtracking
if self.backtrack():
self._trace_backtrack(self._tree, self._frontier)
return 1
# Nothing left to do.
return None
def expand(self, production=None):
"""
Expand the first element of the frontier. In particular, if
the first element of the frontier is a subtree whose node type
is equal to C{production}'s left hand side, then add a child
to that subtree for each element of C{production}'s right hand
side. If C{production} is not specified, then use the first
untried expandable production. If all expandable productions
have been tried, do nothing.
@return: The production used to expand the frontier, if an
expansion was performed. If no expansion was performed,
return C{None}.
@rtype: C{Production} or C{None}
"""
# Make sure we *can* expand.
if len(self._frontier) == 0:
return None
if not isinstance(self._tree[self._frontier[0]], Tree):
return None
# If they didn't specify a production, check all untried ones.
if production is None:
productions = self.untried_expandable_productions()
else: productions = [production]
parses = []
for prod in productions:
# Record that we've tried this production now.
self._tried_e.setdefault(self._freeze(self._tree), []).append(prod)
# Try expanding.
if self._expand(self._rtext, self._tree, self._frontier, prod):
return prod
# We didn't expand anything.
return None
def match(self):
"""
Match the first element of the frontier. In particular, if
the first element of the frontier has the same type as the
next text token, then substitute the text token into the tree.
@return: The token matched, if a match operation was
performed. If no match was performed, return C{None}
@rtype: C{String} or C{None}
"""
# Record that we've tried matching this token.
tok = self._rtext[0]
self._tried_m.setdefault(self._freeze(self._tree), []).append(tok)
# Make sure we *can* match.
if len(self._frontier) == 0:
return None
if isinstance(self._tree[self._frontier[0]], Tree):
return None
if self._match(self._rtext, self._tree, self._frontier):
# Return the token we just matched.
return self._history[-1][0][0]
else:
return None
def backtrack(self):
"""
Return the parser to its state before the most recent
match or expand operation. Calling C{undo} repeatedly return
the parser to successively earlier states. If no match or
expand operations have been performed, C{undo} will make no
changes.
@return: true if an operation was successfully undone.
@rtype: C{boolean}
"""
if len(self._history) == 0: return 0
(self._rtext, self._tree, self._frontier) = self._history.pop()
return 1
def expandable_productions(self):
"""
@return: A list of all the productions for which expansions
are available for the current parser state.
@rtype: C{list} of C{Production}
"""
# Make sure we *can* expand.
if len(self._frontier) == 0: return []
frontier_child = self._tree[self._frontier[0]]
if (len(self._frontier) == 0 or
not isinstance(frontier_child, Tree)):
return []
return [p for p in self._grammar.productions()
if p.lhs().symbol() == frontier_child.node]
def untried_expandable_productions(self):
"""
@return: A list of all the untried productions for which
expansions are available for the current parser state.
@rtype: C{list} of C{Production}
"""
tried_expansions = self._tried_e.get(self._freeze(self._tree), [])
return [p for p in self.expandable_productions()
if p not in tried_expansions]
def untried_match(self):
"""
@return: Whether the first element of the frontier is a token
that has not yet been matched.
@rtype: C{boolean}
"""
if len(self._rtext) == 0: return 0
tried_matches = self._tried_m.get(self._freeze(self._tree), [])
return (self._rtext[0] not in tried_matches)
def currently_complete(self):
"""
@return: Whether the parser's current state represents a
complete parse.
@rtype: C{boolean}
"""
return (len(self._frontier) == 0 and len(self._rtext) == 0)
def _parse(self, remaining_text, tree, frontier):
"""
A stub version of C{_parse} that sets the parsers current
state to the given arguments. In C{RecursiveDescent},
the C{_parse} method is used to recursively continue parsing a
text. C{SteppingRecursiveDescent} overrides it to
capture these recursive calls. It records the parser's old
state in the history (to allow for backtracking), and updates
the parser's new state using the given arguments. Finally, it
returns C{[1]}, which is used by C{match} and C{expand} to
detect whether their operations were successful.
@return: C{[1]}
@rtype: C{list} of C{int}
"""
self._history.append( (self._rtext, self._tree, self._frontier) )
self._rtext = remaining_text
self._tree = tree
self._frontier = frontier
# Is it a good parse? If so, record it.
if (len(frontier) == 0 and len(remaining_text) == 0):
self._parses.append(tree)
self._trace_succeed(self._tree, self._frontier)
return [1]
def parses(self):
"""
@return: A list of the parses that have been found by this
parser so far.
@rtype: C{list} of C{Tree}
"""
return self._parses
# copied from nltk.parser
def set_grammar(self, grammar):
"""
Change the grammar used to parse texts.
@param grammar: The new grammar.
@type grammar: C{CFG}
"""
self._grammar = grammar
##//////////////////////////////////////////////////////
## Demonstration Code
##//////////////////////////////////////////////////////
def demo():
"""
A demonstration of the recursive descent parser.
"""
from en.parser.nltk_lite.parse import cfg
# Define some nonterminals
S, VP, NP, PP = cfg.nonterminals('S, VP, NP, PP')
V, N, P, Name, Det = cfg.nonterminals('V, N, P, Name, Det')
# Define a grammar.
productions = (
# Syntactic Productions
cfg.Production(S, [NP, 'saw', NP]),
cfg.Production(S, [NP, VP]),
cfg.Production(NP, [Det, N]),
cfg.Production(VP, [V, NP, PP]),
cfg.Production(NP, [Det, N, PP]),
cfg.Production(PP, [P, NP]),
# Lexical Productions
cfg.Production(NP, ['I']), cfg.Production(Det, ['the']),
cfg.Production(Det, ['a']), cfg.Production(N, ['man']),
cfg.Production(V, ['saw']), cfg.Production(P, ['in']),
cfg.Production(P, ['with']), cfg.Production(N, ['park']),
cfg.Production(N, ['dog']), cfg.Production(N, ['telescope'])
)
grammar = cfg.Grammar(S, productions)
# Tokenize a sample sentence.
sent = list(tokenize.whitespace('I saw a man in the park'))
# Define a list of parsers.
parser = RecursiveDescent(grammar)
parser.trace()
for p in parser.get_parse_list(sent):
print p
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: Probabilistic Context Free Grammars
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@ldc.upenn.edu> (minor additions)
# Nathan Bodenstab <bodenstab@cslu.ogi.edu> (induction)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
import re
from en.parser.nltk_lite.parse import cfg
from en.parser.nltk_lite.probability import ImmutableProbabilisticMixIn
class Production(cfg.Production, ImmutableProbabilisticMixIn):
"""
A probabilistic context free grammar production.
PCFG C{Production}s are essentially just C{cfg.Production}s that
have probabilities associated with them. These probabilities are
used to record how likely it is that a given production will
be used. In particular, the probability of a C{Production}
records the likelihood that its right-hand side is the correct
instantiation for any given occurance of its left-hand side.
@see: L{cfg.Production}
"""
def __init__(self, lhs, rhs, **prob_kwarg):
"""
Construct a new C{Production}.
@param prob: The probability of the new C{Production}.
@param lhs: The left-hand side of the new C{Production}.
@type lhs: L{Nonterminal}
@param rhs: The right-hand side of the new C{Production}.
@type rhs: sequence of (C{Nonterminal} and (terminal))
"""
ImmutableProbabilisticMixIn.__init__(self, **prob_kwarg)
cfg.Production.__init__(self, lhs, rhs)
def __str__(self):
return cfg.Production.__str__(self) + ' (p=%s)' % self.prob()
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self._lhs == other._lhs and
self._rhs == other._rhs and
self.prob() == other.prob())
def __hash__(self):
return hash((self._lhs, self._rhs, self.prob()))
class Grammar(cfg.Grammar):
"""
A probabilistic context-free grammar. A PCFG Grammar consists of a start
state and a set of productions. The set of terminals and
nonterminals is implicitly specified by the productions.
PCFG productions should be C{Production}s. C{PCFG} Grammars impose
the constraint that the set of productions with any given
left-hand-side must have probabilities that sum to 1.
If you need efficient key-based access to productions, you can use
a subclass to implement it.
@type EPSILON: C{float}
@cvar EPSILON: The acceptable margin of error for checking that
productions with a given left-hand side have probabilities
that sum to 1.
"""
EPSILON = 0.01
def __init__(self, start, productions):
"""
Create a new context-free grammar, from the given start state
and set of C{cfg.Production}s.
@param start: The start symbol
@type start: L{Nonterminal}
@param productions: The list of productions that defines the grammar
@type productions: C{list} of C{Production}
@raise ValueError: if the set of productions with any left-hand-side
do not have probabilities that sum to a value within
EPSILON of 1.
"""
cfg.Grammar.__init__(self, start, productions)
# Make sure that the probabilities sum to one.
probs = {}
for production in productions:
probs[production.lhs()] = (probs.get(production.lhs(), 0) +
production.prob())
for (lhs, p) in probs.items():
if not ((1-Grammar.EPSILON) < p < (1+Grammar.EPSILON)):
raise ValueError("cfg.Productions for %r do not sum to 1" % lhs)
def induce(start, productions):
"""
Induce a PCFG grammar from a list of productions.
The probability of a production A -> B C in a PCFG is:
count(A -> B C)
P(B, C | A) = --------------- where * is any right hand side
count(A -> *)
@param start: The start symbol
@type start: L{Nonterminal}
@param productions: The list of productions that defines the grammar
@type productions: C{list} of L{Production}
"""
pcount = {} # Production count: the number of times a given production occurs
lcount = {} # LHS-count: counts the number of times a given lhs occurs
for prod in productions:
lcount[prod.lhs()] = lcount.get(prod.lhs(), 0) + 1
pcount[prod] = pcount.get(prod, 0) + 1
prods = [Production(p.lhs(), p.rhs(), prob=float(pcount[p]) / lcount[p.lhs()])\
for p in pcount]
return Grammar(start, prods)
#################################################################
# Toy PCFGs
#################################################################
_S, _VP, _NP, _PP = cfg.nonterminals('S, VP, NP, PP')
_V, _N, _P, _Name, _Det = cfg.nonterminals('V, N, P, Name, Det')
toy1 = Grammar(_S, [
Production(_NP, [_Det, _N], prob=0.5),
Production(_NP, [_NP, _PP], prob=0.25),
Production(_NP, ['John'], prob=0.1),
Production(_NP, ['I'], prob=0.15),
Production(_Det, ['the'], prob=0.8),
Production(_Det, ['my'], prob=0.2),
Production(_N, ['dog'], prob=0.5),
Production(_N, ['cookie'], prob=0.5),
Production(_VP, [_VP, _PP], prob=0.1),
Production(_VP, [_V, _NP], prob=0.7),
Production(_VP, [_V], prob=0.2),
Production(_V, ['ate'], prob=0.35),
Production(_V, ['saw'], prob=0.65),
Production(_S, [_NP, _VP], prob=1.0),
Production(_PP, [_P, _NP], prob=1.0),
Production(_P, ['with'], prob=0.61),
Production(_P, ['under'], prob=0.39)])
toy2 = Grammar(_S, [
Production(_V, ['saw'], prob=0.21),
Production(_V, ['ate'], prob=0.51),
Production(_V, ['ran'], prob=0.28),
Production(_N, ['boy'], prob=0.11),
Production(_N, ['cookie'], prob=0.12),
Production(_N, ['table'], prob=0.13),
Production(_N, ['telescope'], prob=0.14),
Production(_N, ['hill'], prob=0.50),
Production(_Name, ['Jack'], prob=0.52),
Production(_Name, ['Bob'], prob=0.48),
Production(_P, ['with'], prob=0.61),
Production(_P, ['under'], prob=0.39),
Production(_Det, ['the'], prob=0.41),
Production(_Det, ['a'], prob=0.31),
Production(_Det, ['my'], prob=0.28),
Production(_S, [_NP, _VP], prob=1.00),
Production(_VP, [_V, _NP], prob=0.59),
Production(_VP, [_V], prob=0.40),
Production(_VP, [_VP, _PP], prob=0.01),
Production(_NP, [_Det, _N], prob=0.41),
Production(_NP, [_Name], prob=0.28),
Production(_NP, [_NP, _PP], prob=0.31),
Production(_PP, [_P, _NP], prob=1.00)])
#################################################################
# Demonstration
#################################################################
def demo():
"""
A demonstration showing how PCFG C{Grammar}s can be created and used.
"""
from en.parser.nltk_lite.corpora import treebank, extract
from en.parser.nltk_lite.parse import cfg, pcfg, pchart, treetransforms
from itertools import islice
# Create some probabilistic CFG Productions
S, A, B, C = cfg.nonterminals('S A B C')
pcfg_prods = [pcfg.Production(A, [B, B], prob=0.3),
pcfg.Production(A, [C, B, C], prob=0.7),
pcfg.Production(B, [B, 'b'], prob=0.5),
pcfg.Production(B, [C], prob=0.5),
pcfg.Production(C, ['a'], prob=0.1),
pcfg.Production(C, ['b'], prob=0.9)]
pcfg_prod = pcfg_prods[2]
print 'A PCFG production:', `pcfg_prod`
print ' pcfg_prod.lhs() =>', `pcfg_prod.lhs()`
print ' pcfg_prod.rhs() =>', `pcfg_prod.rhs()`
print ' pcfg_prod.prob() =>', `pcfg_prod.prob()`
print
# Create and print a PCFG
grammar = pcfg.Grammar(S, pcfg_prods)
print 'A PCFG grammar:', `grammar`
print ' grammar.start() =>', `grammar.start()`
print ' grammar.productions() =>',
# Use string.replace(...) is to line-wrap the output.
print `grammar.productions()`.replace(',', ',\n'+' '*26)
print
# extract productions from three trees and induce the PCFG
print "Induce PCFG grammar from treebank data:"
productions = []
for tree in islice(treebank.parsed(),3):
# perform optional in-place tree transformations, e.g.:
# treetransforms.collapseUnary(tree, collapsePOS = False)
# treetransforms.chomskyNormalForm(tree, horzMarkov = 2)
productions += tree.productions()
grammar = pcfg.induce(S, productions)
print grammar
print
print "Parse sentence using induced grammar:"
parser = pchart.InsideParse(grammar)
parser.trace(3)
sent = extract(0, treebank.raw())
print sent
for parse in parser.get_parse_list(sent):
print parse
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: Tree Transformations
#
# Copyright (C) 2005-2006 Oregon Graduate Institute
# Author: Nathan Bodenstab <bodenstab@cslu.ogi.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
A collection of methods for tree (grammar) transformations used
in parsing natural language.
Although many of these methods are technically grammar transformations
(ie. Chomsky Norm Form), when working with treebanks it is much more
natural to visualize these modifications in a tree structure. Hence,
we will do all transformation directly to the tree itself.
Transforming the tree directly also allows us to do parent annotation.
A grammar can then be simply induced from the modified tree.
The following is a short tutorial on the available transformations.
1) Chomsky Normal Form (binarization)
It is well known that any grammar has a Chomsky Normal Form (CNF)
equivalent grammar where CNF is defined by every production having
either two non-terminals or one terminal on its right hand side.
When we have hierarchically structured data (ie. a treebank), it is
natural to view this in terms of productions where the root of every
subtree is the head (left hand side) of the production and all of
its children are the right hand side constituents. In order to
convert a tree into CNF, we simply need to ensure that every subtree
has either two subtrees as children (binarization), or one leaf node
(non-terminal). In order to binarize a subtree with more than two
children, we must introduce artificial nodes.
There are two popular methods to convert a tree into CNF: left
factoring and right factoring. The following example demonstrates
the difference between them.
Original Right-Factored Left-Factored
Example: A A A
/ | \ / \ / \
B C D ==> B A|<C-D> OR A|<B-C> D
/ \ / \
C D B C
2) Parent Annotation
In addition to binarizing the tree, there are two standard
modifications to node labels we can do in the same traversal: parent
annotation and Markov order-N smoothing (or sibling smoothing).
The purpose of parent annotation is to refine the probabilities of
productions by adding a small amount of context. With this simple
addition, a CYK (inside-outside, dynamic programming chart parse)
can improve from 74% to 79% accuracy. A natural generalization from
parent annotation is to grandparent annotation and beyond. The
tradeoff becomes accuracy gain vs. computational complexity. We
must also keep in mind data sparcity issues.
Original Parent Annotation
Example: A A^<?>
/ | \ / \
B C D ==> B^<A> A|<C-D>^<?> where ? is the parent of A
/ \
C^<A> D^<A>
3) Markov order-N smoothing
Markov smoothing combats data sparcity issues as well as decreasing
computational requirements by limiting the number of children
included in artificial nodes. In practice, most people use an order
2 grammar.
Original No Smoothing Markov order 1 Markov order 2 etc...
Example: A A A A
/ / | \ \ / \ / \ / \
B C D E F ==> B A|<C-D-E-F> ==> B A|<C> ==> B A|<C-D>
/ \ / \ / \
C ... C ... C ...
Annotation decisions can be thought about in the vertical direction
(parent, grandparent, etc) and the horizontal direction (number of
siblings to keep). Parameters to the following functions specify
these values. For more information see:
Dan Klein and Chris Manning (2003) "Accurate Unlexicalized Parsing", ACL-03.
http://www.aclweb.org/anthology/P03-1054
4) Unary Collapsing
Collapse unary productions (ie. subtrees with a single child) into a
new non-terminal (Tree node). This is useful when working with
algorithms that do not allow unary productions, yet you do not wish
to lose the parent information.
A
|
Example: B ==> A+B
/ \ / \
C D C D
"""
from en.parser.nltk_lite.parse.tree import Tree
def chomskyNormalForm(tree, factor = "right", horzMarkov = None, vertMarkov = 0, childChar = "|", parentChar = "^"):
"""
This method can modify a tree in three ways:
1. Convert a tree into its Chomsky Normal Form (CNF) equivalent -- Every subtree
has either two non-terminals or one terminal as its children. This process
requires the creation of more "artificial" non-terminal nodes.
2. Markov (vertical) smoothing of children in new artificial nodes
3. Horizontal (parent) annotation of nodes
see documentation in code for more information
@param tree: The Tree to be modified
@type tree: C{Tree}
@param factor: Right or left factoring method (default = "right")
@type factor: C{string} = [left|right]
@param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings)
@type horzMarkov: C{int} | None
@param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation)
@type vertMarkov: C{int} | None
@param childChar: A string used in construction of the artificial nodes, separating the head of the
original subtree from the child nodes that have yet to be expanded (default = "|")
@type childChar: C{string}
@param parentChar: A string used to separate the node representation from its vertical annotation
@type parentChar: C{string}
"""
# assume all subtrees have homogeneous children
# assume all terminals have no siblings
# A semi-hack to have elegant looking code below. As a result,
# any subtree with a branching factor greater than 999 will be incorrectly truncated.
if horzMarkov == None: horzMarkov = 999
# Traverse the tree depth-first keeping a list of ancestor nodes to the root.
# I chose not to use the tree.treepositions() method since it requires
# two traversals of the tree (one to get the positions, one to iterate
# over them) and node access time is proportional to the height of the node.
# This method is 7x faster which helps when parsing 40,000 sentences.
nodeList = [(tree, [tree.node])]
while nodeList != []:
node, parent = nodeList.pop()
if isinstance(node,Tree):
# parent annotation
parentString = ""
originalNode = node.node
if vertMarkov != 0 and node != tree and isinstance(node[0],Tree):
parentString = "%s<%s>" % (parentChar, "-".join(parent))
node.node += parentString
parent = [originalNode] + parent[:vertMarkov - 1]
# add children to the agenda before we mess with them
for child in node:
nodeList.append((child, parent))
# chomsky normal form factorization
if len(node) > 2:
childNodes = [child.node for child in node]
nodeCopy = node.copy()
node[0:] = [] # delete the children
curNode = node
numChildren = len(nodeCopy)
for i in range(1,numChildren - 1):
if factor == "right":
newHead = "%s%s<%s>%s" % (originalNode, childChar, "-".join(childNodes[i:min([i+horzMarkov,numChildren])]),parentString) # create new head
newNode = Tree(newHead, [])
curNode[0:] = [nodeCopy.pop(0), newNode]
else:
newHead = "%s%s<%s>%s" % (originalNode, childChar, "-".join(childNodes[max([numChildren-i-horzMarkov,0]):-i]),parentString)
newNode = Tree(newHead, [])
curNode[0:] = [newNode, nodeCopy.pop()]
curNode = newNode
curNode[0:] = [child for child in nodeCopy]
def unChomskyNormalForm(tree, expandUnary = True, childChar = "|", parentChar = "^", unaryChar = "+"):
"""
This method modifies the tree in three ways:
1. Transforms a tree in Chomsky Normal Form back to its original structure (branching greater than two)
2. Removes any parent annotation (if it exists)
3. (optional) expands unary subtrees (if previously collapsed with collapseUnary(...) )
@param tree: The Tree to be modified
@type tree: C{Tree}
@param expandUnary: Flag to expand unary or not (default = True)
@type expandUnary: C{boolean}
@param childChar: A string separating the head node from its children in an artificial node (default = "|")
@type childChar: C{string}
@param parentChar: A sting separating the node label from its parent annotation (default = "^")
@type parentChar: C{string}
@param unaryChar: A string joining two non-terminals in a unary production (default = "+")
@type unaryChar: C{string}
"""
# Traverse the tree-depth first keeping a pointer to the parent for modification purposes.
nodeList = [(tree,[])]
while nodeList != []:
node,parent = nodeList.pop()
if isinstance(node,Tree):
# if the node contains the 'childChar' character it means that
# it is an artificial node and can be removed, although we still need
# to move its children to its parent
childIndex = node.node.find(childChar)
if childIndex != -1:
nodeIndex = parent.index(node)
parent.remove(parent[nodeIndex])
# Generated node was on the left if the nodeIndex is 0 which
# means the grammar was left factored. We must insert the children
# at the beginning of the parent's children
if nodeIndex == 0:
parent.insert(0,node[0])
parent.insert(1,node[1])
else:
parent.extend([node[0],node[1]])
# parent is now the current node so the children of parent will be added to the agenda
node = parent
else:
parentIndex = node.node.find(parentChar)
if parentIndex != -1:
# strip the node name of the parent annotation
node.node = node.node[:parentIndex]
# expand collapsed unary productions
if expandUnary == True:
unaryIndex = node.node.find(unaryChar)
if unaryIndex != -1:
newNode = Tree(node.node[unaryIndex + 1:], [i for i in node])
node.node = node.node[:unaryIndex]
node[0:] = [newNode]
for child in node:
nodeList.append((child,node))
def collapseUnary(tree, collapsePOS = False, collapseRoot = False, joinChar = "+"):
"""
Collapse subtrees with a single child (ie. unary productions)
into a new non-terminal (Tree node) joined by 'joinChar'.
This is useful when working with algorithms that do not allow
unary productions, and completely removing the unary productions
would require loss of useful information. The Tree is modified
directly (since it is passed by reference) and no value is returned.
@param tree: The Tree to be collapsed
@type tree: C{Tree}
@param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie.
Part-of-Speech tags) since they are always unary productions
@type collapsePOS: C{boolean}
@param collapseRoot: 'False' (default) will not modify the root production
if it is unary. For the Penn WSJ treebank corpus, this corresponds
to the TOP -> productions.
@type collapseRoot: C{boolean}
@param joinChar: A string used to connect collapsed node values (default = "+")
@type joinChar: C{string}
"""
if collapseRoot == False and isinstance(tree, Tree) and len(tree) == 1:
nodeList = [tree[0]]
else:
nodeList = [tree]
# depth-first traversal of tree
while nodeList != []:
node = nodeList.pop()
if isinstance(node,Tree):
if len(node) == 1 and isinstance(node[0], Tree) and (collapsePOS == True or isinstance(node[0,0], Tree)):
node.node += joinChar + node[0].node
node[0:] = [child for child in node[0]]
# since we assigned the child's children to the current node,
# evaluate the current node again
nodeList.append(node)
else:
for child in node:
nodeList.append(child)
def toTreebank(tree):
"""
Convert a tree into its treebank-style bracketed equivalent.
"""
return _toTreebank(tree).strip()
def _toTreebank(tree):
s = " (%s" % tree.node
for child in tree:
if isinstance(child,Tree):
s += _toTreebank(child)
else:
s += " " + child
return s + ")"
#################################################################
# Demonstration
#################################################################
def demo():
"""
A demonstration showing how each tree transform can be used.
"""
from en.parser.nltk_lite.draw.tree import draw_trees
from en.parser.nltk_lite.parse import bracket_parse
from en.parser.nltk_lite.parse import treetransforms
from copy import deepcopy
# original tree from WSJ bracketed text
sentence = "(TOP (S (S (VP (VBN Turned) (ADVP (RB loose)) (PP (IN in) (NP (NP (NNP Shane) (NNP Longman) (POS 's)) (NN trading) (NN room))))) (, ,) (NP (DT the) (NN yuppie) (NNS dealers)) (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) (. .)))"
tree = bracket_parse(sentence)
# collapse subtrees with only one child
collapsedTree = deepcopy(tree)
treetransforms.collapseUnary(collapsedTree)
# convert the tree to CNF
cnfTree = deepcopy(collapsedTree)
treetransforms.chomskyNormalForm(cnfTree)
# convert the tree to CNF with parent annotation (one level) and horizontal smoothing of order two
parentTree = deepcopy(collapsedTree)
treetransforms.chomskyNormalForm(parentTree, horzMarkov=2, vertMarkov=1)
# convert the tree back to its original form (used to make CYK results comparable)
original = deepcopy(parentTree)
treetransforms.unChomskyNormalForm(original)
# convert tree back to bracketed text
sentence2 = treetransforms.toTreebank(original)
print "Sentences the same? ", sentence == sentence2
draw_trees(tree, collapsedTree, cnfTree, parentTree, original)
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: Chart Parser for Feature-Based Grammars
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Rob Speer <rspeer@mit.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: featurechart.py 3587 2006-10-20 06:13:32Z ehk $
"""
Extension of chart parsing implementation to handle grammars with
feature structgures as nodes.
"""
from en.parser.nltk_lite.parse.chart import *
from en.parser.nltk_lite.parse.category import *
from en.parser.nltk_lite.parse import cfg
def apply(obj, vars):
"""A helper function to determine the value of an object when variables
are set. It uses apply_bindings if the object is a Category, and simply
returns the object otherwise."""
if isinstance(obj, Category): return obj.apply_bindings(vars)
else: return obj
class FeatureTreeEdge(TreeEdge):
"""
A modification of L{TreeEdge} to handle nonterminals with features
(known as L{Categories<Category>}.
In addition to the span, left-hand side, right-hand side, and dot position
(described at L{TreeEdge}), a C{FeatureTreeEdge} includes X{vars}, a
set of L{FeatureBindings} saying which L{FeatureVariable}s are set to which
values.
These values are applied when examining the C{lhs} or C{rhs} of a
C{FeatureTreeEdge}.
For more information about edges, see the L{EdgeI} interface.
"""
def __init__(self, span, lhs, rhs, dot=0, vars=None):
"""
Construct a new C{FeatureTreeEdge}.
@type span: C{(int, int)}
@param span: A tuple C{(s,e)}, where C{subtokens[s:e]} is the
portion of the sentence that is consistent with the new
edge's structure.
@type lhs: L{Category}
@param lhs: The new edge's left-hand side, specifying the
hypothesized tree's node value.
@type rhs: C{list} of (L{Category} and C{string})
@param rhs: The new edge's right-hand side, specifying the
hypothesized tree's children.
@type dot: C{int}
@param dot: The position of the new edge's dot. This position
specifies what prefix of the production's right hand side
is consistent with the text. In particular, if
C{sentence} is the list of subtokens in the sentence, then
C{subtokens[span[0]:span[1]]} can be spanned by the
children specified by C{rhs[:dot]}.
@type vars: L{FeatureBindings}
@param vars: The bindings specifying what values certain variables in
this edge must have.
"""
TreeEdge.__init__(self, span, lhs, rhs, dot)
if vars is None: vars = FeatureBindings()
self._vars = vars
# [staticmethod]
def from_production(production, index, bindings=None):
"""
@return: A new C{FeatureTreeEdge} formed from the given production.
The new edge's left-hand side and right-hand side will
be taken from C{production}; its span will be C{(index,
index)}; its dot position will be C{0}, and it may have specified
variables set.
@rtype: L{FeatureTreeEdge}
"""
return FeatureTreeEdge(span=(index, index), lhs=production.lhs(),
rhs=production.rhs(), dot=0, vars=bindings)
from_production = staticmethod(from_production)
# Accessors
def vars(self):
"""
@return: the L{VariableBindings} mapping L{FeatureVariable}s to values.
@rtype: L{VariableBindings}
"""
return self._vars.copy()
def lhs(self):
"""
@return: the value of the left-hand side with variables set.
@rtype: C{Category}
"""
return TreeEdge.lhs(self).apply_bindings(self._vars)
def orig_lhs(self):
"""
@return: the value of the left-hand side with no variables set.
@rtype: C{Category}
"""
return TreeEdge.lhs(self)
def rhs(self):
"""
@return: the value of the right-hand side with variables set.
@rtype: C{Category}
"""
return tuple([apply(x, self._vars) for x in TreeEdge.rhs(self)])
def orig_rhs(self):
"""
@return: the value of the right-hand side with no variables set.
@rtype: C{Category}
"""
return TreeEdge.rhs(self)
# String representation
def __str__(self):
str = '%r ->' % self.lhs()
for i in range(len(self._rhs)):
if i == self._dot: str += ' *'
str += ' %r' % (self.rhs()[i],)
if len(self._rhs) == self._dot: str += ' *'
return str
class FeatureFundamentalRule(FundamentalRule):
def apply_iter(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.end() == right_edge.start() and
left_edge.is_incomplete() and right_edge.is_complete() and
isinstance(left_edge, FeatureTreeEdge) and
isinstance(right_edge, FeatureTreeEdge)
):
return
bindings = left_edge.vars()
unify = left_edge.next().unify(right_edge.lhs().remove_unbound_vars(), bindings)
if unify is None: return
# Construct the new edge.
new_edge = FeatureTreeEdge(span=(left_edge.start(), right_edge.end()),
lhs=left_edge.lhs(), rhs=left_edge.rhs(),
dot=left_edge.dot()+1, vars=bindings)
# Add it to the chart, with appropraite child pointers.
changed_chart = False
for cpl1 in chart.child_pointer_lists(left_edge):
if chart.insert(new_edge, cpl1+(right_edge,)):
changed_chart = True
# If we changed the chart, then generate the edge.
if changed_chart: yield new_edge
class SingleEdgeFeatureFundamentalRule(SingleEdgeFundamentalRule):
_fundamental_rule = FeatureFundamentalRule()
def apply_iter(self, chart, grammar, edge1):
fr = self._fundamental_rule
if edge1.is_incomplete():
# edge1 = left_edge; edge2 = right_edge
for edge2 in chart.select(start=edge1.end(), is_complete=True):
for new_edge in fr.apply_iter(chart, grammar, edge1, edge2):
yield new_edge
else:
# edge2 = left_edge; edge1 = right_edge
for edge2 in chart.select(end=edge1.start(), is_complete=False):
for new_edge in fr.apply_iter(chart, grammar, edge2, edge1):
yield new_edge
class FeatureTopDownExpandRule(TopDownExpandRule):
"""
The @C{TopDownExpandRule} specialised for feature-based grammars.
"""
def apply_iter(self, chart, grammar, edge):
if edge.is_complete(): return
for prod in grammar.productions():
bindings = FeatureBindings()
unify = edge.next().unify(prod.lhs(), bindings)
# Bindings are not preserved here. Should they be?
if unify is not None:
new_edge = FeatureTreeEdge.from_production(prod, edge.end())
if chart.insert(new_edge, ()):
yield new_edge
class FeatureEarleyChartParse(EarleyChartParse):
"""
A chart parser implementing the Earley parsing algorithm, allowing
nonterminals that have features (known as L{Categories<Category>}).
- For each index I{end} in [0, 1, ..., N]:
- For each I{edge} s.t. I{edge}.end = I{end}:
- If I{edge} is incomplete, and I{edge}.next is not a part
of speech:
- Apply PredictorRule to I{edge}
- If I{edge} is incomplete, and I{edge}.next is a part of
speech:
- Apply ScannerRule to I{edge}
- If I{edge} is complete:
- Apply CompleterRule to I{edge}
- Return any complete parses in the chart
C{FeatureEarleyChartParse} uses a X{lexicon} to decide whether a leaf
has a given part of speech. This lexicon is encoded as a
dictionary that maps each word to a list of parts of speech that
word can have. Unlike in the L{EarleyChartParse}, this lexicon is
case-insensitive.
"""
def __init__(self, grammar, lexicon, trace=0):
# Build a case-insensitive lexicon.
ci_lexicon = dict([(k.upper(), v) for k, v in lexicon.iteritems()])
# Call the super constructor.
EarleyChartParse.__init__(self, grammar, ci_lexicon, trace)
def get_parse_list(self, tokens):
chart = Chart(tokens)
grammar = self._grammar
# Width, for printing trace edges.
#w = 40/(chart.num_leaves()+1)
w = 2
if self._trace > 0: print ' '*9, chart.pp_leaves(w)
# Initialize the chart with a special "starter" edge.
root = GrammarCategory(pos='[INIT]')
edge = FeatureTreeEdge((0,0), root, (grammar.start(),), 0,
FeatureBindings())
chart.insert(edge, ())
# Create the 3 rules:
predictor = FeatureTopDownExpandRule()
completer = SingleEdgeFeatureFundamentalRule()
#scanner = FeatureScannerRule(self._lexicon)
for end in range(chart.num_leaves()+1):
if self._trace > 1: print 'Processing queue %d' % end
# Scanner rule substitute, i.e. this is being used in place
# of a proper FeatureScannerRule at the moment.
if end > 0 and end-1 < chart.num_leaves():
leaf = chart.leaf(end-1)
for pos in self._lexicon.get(leaf.upper(), []):
new_leaf_edge = LeafEdge(leaf, end-1)
chart.insert(new_leaf_edge, ())
new_pos_edge = FeatureTreeEdge((end-1, end), pos, [leaf], 1,
FeatureBindings())
chart.insert(new_pos_edge, (new_leaf_edge,))
if self._trace > 0:
print 'Scanner ', chart.pp_edge(new_leaf_edge,w)
for edge in chart.select(end=end):
if edge.is_incomplete():
for e in predictor.apply(chart, grammar, edge):
if self._trace > 0:
print 'Predictor', chart.pp_edge(e,w)
#if edge.is_incomplete():
# for e in scanner.apply(chart, grammar, edge):
# if self._trace > 0:
# print 'Scanner ', chart.pp_edge(e,w)
if edge.is_complete():
for e in completer.apply(chart, grammar, edge):
if self._trace > 0:
print 'Completer', chart.pp_edge(e,w)
# Output a list of complete parses.
return chart.parses(root)
def demo():
import sys, time
S = GrammarCategory.parse('S')
VP = GrammarCategory.parse('VP')
NP = GrammarCategory.parse('NP')
PP = GrammarCategory.parse('PP')
V = GrammarCategory.parse('V')
N = GrammarCategory.parse('N')
P = GrammarCategory.parse('P')
Name = GrammarCategory.parse('Name')
Det = GrammarCategory.parse('Det')
DetSg = GrammarCategory.parse('Det[-pl]')
DetPl = GrammarCategory.parse('Det[+pl]')
NSg = GrammarCategory.parse('N[-pl]')
NPl = GrammarCategory.parse('N[+pl]')
# Define some grammatical productions.
grammatical_productions = [
cfg.Production(S, (NP, VP)), cfg.Production(PP, (P, NP)),
cfg.Production(NP, (NP, PP)),
cfg.Production(VP, (VP, PP)), cfg.Production(VP, (V, NP)),
cfg.Production(VP, (V,)), cfg.Production(NP, (DetPl, NPl)),
cfg.Production(NP, (DetSg, NSg))]
# Define some lexical productions.
lexical_productions = [
cfg.Production(NP, ('John',)), cfg.Production(NP, ('I',)),
cfg.Production(Det, ('the',)), cfg.Production(Det, ('my',)),
cfg.Production(Det, ('a',)),
cfg.Production(NSg, ('dog',)), cfg.Production(NSg, ('cookie',)),
cfg.Production(V, ('ate',)), cfg.Production(V, ('saw',)),
cfg.Production(P, ('with',)), cfg.Production(P, ('under',)),
]
earley_grammar = cfg.Grammar(S, grammatical_productions)
earley_lexicon = {}
for prod in lexical_productions:
earley_lexicon.setdefault(prod.rhs()[0].upper(), []).append(prod.lhs())
sent = 'I saw John with a dog with my cookie'
print "Sentence:\n", sent
from en.parser.nltk_lite import tokenize
tokens = list(tokenize.whitespace(sent))
t = time.time()
cp = FeatureEarleyChartParse(earley_grammar, earley_lexicon, trace=1)
trees = cp.get_parse_list(tokens)
print "Time: %s" % (time.time() - t)
for tree in trees: print tree
def run_profile():
import profile
profile.run('for i in range(10): demo()', '/tmp/profile.out')
import pstats
p = pstats.Stats('/tmp/profile.out')
p.strip_dirs().sort_stats('time', 'cum').print_stats(60)
p.strip_dirs().sort_stats('cum', 'time').print_stats(60)
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Probabilistic Chart Parsers
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for associating probabilities with tree
structures that represent the internal organization of a text. The
probabilistic parser module defines C{BottomUpChartParse}.
C{BottomUpChartParse} is an abstract class that implements a
bottom-up chart parser for C{PCFG}s. It maintains a queue of edges,
and adds them to the chart one at a time. The ordering of this queue
is based on the probabilities associated with the edges, allowing the
parser to expand more likely edges before less likely ones. Each
subclass implements a different queue ordering, producing different
search strategies. Currently the following subclasses are defined:
- C{InsideParse} searches edges in decreasing order of
their trees' inside probabilities.
- C{RandomParse} searches edges in random order.
- C{LongestParse} searches edges in decreasing order of their
location's length.
- C{BeamParse} limits the number of edges in the queue, and
searches edges in decreasing order of their trees' inside
probabilities.
"""
##//////////////////////////////////////////////////////
## Bottom-Up PCFG Chart Parser
##//////////////////////////////////////////////////////
# [XX] This might not be implemented quite right -- it would be better
# to associate probabilities with child pointer lists.
from en.parser.nltk_lite.parse.chart import *
from en.parser.nltk_lite.parse.tree import ProbabilisticTree
from en.parser.nltk_lite.parse.cfg import Nonterminal
# Probabilistic edges
class ProbabilisticLeafEdge(LeafEdge):
def prob(self): return 1.0
class ProbabilisticTreeEdge(TreeEdge):
def __init__(self, prob, *args, **kwargs):
self._prob = prob
TreeEdge.__init__(self, *args, **kwargs)
def prob(self): return self._prob
def __cmp__(self, other):
if self._prob != other.prob(): return -1
return TreeEdge.__cmp__(self, other)
def from_production(production, index, p):
return ProbabilisticTreeEdge(p, (index, index), production.lhs(),
production.rhs(), 0)
from_production = staticmethod(from_production)
# Rules using probabilistic edges
class BottomUpInitRule(AbstractChartRule):
NUM_EDGES=0
def apply_iter(self, chart, grammar):
for index in range(chart.num_leaves()):
new_edge = ProbabilisticLeafEdge(chart.leaf(index), index)
if chart.insert(new_edge, ()):
yield new_edge
class BottomUpPredictRule(AbstractChartRule):
NUM_EDGES=1
def apply_iter(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions():
if edge.lhs() == prod.rhs()[0]:
new_edge = ProbabilisticTreeEdge.from_production(prod, edge.start(), prod.prob())
if chart.insert(new_edge, ()):
yield new_edge
class FundamentalRule(AbstractChartRule):
NUM_EDGES=2
def apply_iter(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.end() == right_edge.start() and
left_edge.next() == right_edge.lhs() and
left_edge.is_incomplete() and right_edge.is_complete()):
return
# Construct the new edge.
p = left_edge.prob() * right_edge.prob()
new_edge = ProbabilisticTreeEdge(p,
span=(left_edge.start(), right_edge.end()),
lhs=left_edge.lhs(), rhs=left_edge.rhs(),
dot=left_edge.dot()+1)
# Add it to the chart, with appropriate child pointers.
changed_chart = False
for cpl1 in chart.child_pointer_lists(left_edge):
if chart.insert(new_edge, cpl1+(right_edge,)):
changed_chart = True
# If we changed the chart, then generate the edge.
if changed_chart: yield new_edge
class SingleEdgeFundamentalRule(AbstractChartRule):
NUM_EDGES=1
_fundamental_rule = FundamentalRule()
def apply_iter(self, chart, grammar, edge1):
fr = self._fundamental_rule
if edge1.is_incomplete():
# edge1 = left_edge; edge2 = right_edge
for edge2 in chart.select(start=edge1.end(), is_complete=True,
lhs=edge1.next()):
for new_edge in fr.apply_iter(chart, grammar, edge1, edge2):
yield new_edge
else:
# edge2 = left_edge; edge1 = right_edge
for edge2 in chart.select(end=edge1.start(), is_complete=False,
next=edge1.lhs()):
for new_edge in fr.apply_iter(chart, grammar, edge2, edge1):
yield new_edge
def __str__(self): return 'Fundamental Rule'
class BottomUpChartParse(AbstractParse):
"""
An abstract bottom-up parser for C{PCFG}s that uses a C{Chart} to
record partial results. C{BottomUpChartParse} maintains a
queue of edges that can be added to the chart. This queue is
initialized with edges for each token in the text that is being
parsed. C{BottomUpChartParse} inserts these edges into the
chart one at a time, starting with the most likely edges, and
proceeding to less likely edges. For each edge that is added to
the chart, it may become possible to insert additional edges into
the chart; these are added to the queue. This process continues
until enough complete parses have been generated, or until the
queue is empty.
The sorting order for the queue is not specified by
C{BottomUpChartParse}. Different sorting orders will result
in different search strategies. The sorting order for the queue
is defined by the method C{sort_queue}; subclasses are required
to provide a definition for this method.
@type _grammar: C{PCFG}
@ivar _grammar: The grammar used to parse sentences.
@type _trace: C{int}
@ivar _trace: The level of tracing output that should be generated
when parsing a text.
"""
def __init__(self, grammar, trace=0):
"""
Create a new C{BottomUpChartParse}, that uses C{grammar}
to parse texts.
@type grammar: C{PCFG}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._trace = trace
AbstractParse.__init__(self)
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
@type trace: C{int}
@param trace: The trace level. A trace level of C{0} will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
@rtype: C{None}
"""
self._trace = trace
def get_parse_list(self, tokens):
chart = Chart(tokens)
grammar = self._grammar
# Chart parser rules.
bu_init = BottomUpInitRule()
bu = BottomUpPredictRule()
fr = SingleEdgeFundamentalRule()
# Our queue!
queue = []
# Initialize the chart.
for e in bu_init.apply_iter(chart, grammar):
if self._trace>1: chart.pp_edge(e,width=2)
queue.append(e)
while len(queue) > 0:
# Re-sort the queue.
self.sort_queue(queue, chart)
# Get the best edge.
edge = queue.pop()
if self._trace>0:
print ' %-50s prob=%s' % (chart.pp_edge(edge,width=2),
edge.prob())
# Apply BU & FR to it.
queue.extend(bu.apply(chart, grammar, edge))
queue.extend(fr.apply(chart, grammar, edge))
# Get a list of complete parses.
parses = chart.parses(grammar.start(), ProbabilisticTree)
# Assign probabilities to the trees.
prod_probs = {}
for prod in grammar.productions():
prod_probs[prod.lhs(), prod.rhs()] = prod.prob()
for parse in parses:
self._setprob(parse, prod_probs)
# Sort by probability
parses.sort(lambda a,b: cmp(b.prob(), a.prob()))
return parses
def _setprob(self, tree, prod_probs):
if tree.prob() is not None: return
# Get the prob of the CFG production.
lhs = Nonterminal(tree.node)
rhs = []
for child in tree:
if isinstance(child, Tree):
rhs.append(Nonterminal(child.node))
else:
rhs.append(child)
prob = prod_probs[lhs, tuple(rhs)]
# Get the probs of children.
for child in tree:
if isinstance(child, Tree):
self._setprob(child, prod_probs)
prob *= child.prob()
tree.set_prob(prob)
def sort_queue(self, queue, chart):
"""
Sort the given queue of C{Edge}s, placing the edge that should
be tried first at the beginning of the queue. This method
will be called after each C{Edge} is added to the queue.
@param queue: The queue of C{Edge}s to sort. Each edge in
this queue is an edge that could be added to the chart by
the fundamental rule; but that has not yet been added.
@type queue: C{list} of C{Edge}
@param chart: The chart being used to parse the text. This
chart can be used to provide extra information for sorting
the queue.
@type chart: C{Chart}
@rtype: C{None}
"""
raise AssertionError, "BottomUpChartParse is an abstract class"
class InsideParse(BottomUpChartParse):
"""
A bottom-up parser for C{PCFG}s that tries edges in descending
order of the inside probabilities of their trees. The X{inside
probability} of a tree is simply the
probability of the entire tree, ignoring its context. In
particular, the inside probability of a tree generated by
production M{p} with children M{c[1]}, M{c[2]}, ..., M{c[n]} is
P(M{p})*P(M{c[1]})*P(M{c[2]})*M{...}*P(M{c[n]}); and the inside
probability of a token is 1 if it is present in the text, and 0 if
it is absent.
This sorting order results in a type of lowest-cost-first search
strategy.
"""
# Inherit constructor.
def sort_queue(self, queue, chart):
"""
Sort the given queue of edges, in descending order of the
inside probabilities of the edges' trees.
@param queue: The queue of C{Edge}s to sort. Each edge in
this queue is an edge that could be added to the chart by
the fundamental rule; but that has not yet been added.
@type queue: C{list} of C{Edge}
@param chart: The chart being used to parse the text. This
chart can be used to provide extra information for sorting
the queue.
@type chart: C{Chart}
@rtype: C{None}
"""
queue.sort(lambda e1,e2:cmp(e1.prob(), e2.prob()))
# Eventually, this will become some sort of inside-outside parser:
# class InsideOutsideParse(BottomUpChartParse):
# def __init__(self, grammar, trace=0):
# # Inherit docs.
# BottomUpChartParse.__init__(self, grammar, trace)
#
# # Find the best path from S to each nonterminal
# bestp = {}
# for production in grammar.productions(): bestp[production.lhs()]=0
# bestp[grammar.start()] = 1.0
#
# for i in range(len(grammar.productions())):
# for production in grammar.productions():
# lhs = production.lhs()
# for elt in production.rhs():
# bestp[elt] = max(bestp[lhs]*production.prob(),
# bestp.get(elt,0))
#
# self._bestp = bestp
# for (k,v) in self._bestp.items(): print k,v
#
# def _cmp(self, e1, e2):
# return cmp(e1.structure()[PROB]*self._bestp[e1.lhs()],
# e2.structure()[PROB]*self._bestp[e2.lhs()])
#
# def sort_queue(self, queue, chart):
# queue.sort(self._cmp)
import random
class RandomParse(BottomUpChartParse):
"""
A bottom-up parser for C{PCFG}s that tries edges in random order.
This sorting order results in a random search strategy.
"""
# Inherit constructor
def sort_queue(self, queue, chart):
i = random.randint(0, len(queue)-1)
(queue[-1], queue[i]) = (queue[i], queue[-1])
class UnsortedParse(BottomUpChartParse):
"""
A bottom-up parser for C{PCFG}s that tries edges in whatever order.
"""
# Inherit constructor
def sort_queue(self, queue, chart): return
class LongestParse(BottomUpChartParse):
"""
A bottom-up parser for C{PCFG}s that tries longer edges before
shorter ones. This sorting order results in a type of best-first
search strategy.
"""
# Inherit constructor
def sort_queue(self, queue, chart):
queue.sort(lambda e1,e2: cmp(e1.length(), e2.length()))
class BeamParse(BottomUpChartParse):
"""
A bottom-up parser for C{PCFG}s that limits the number of edges in
its edge queue.
"""
def __init__(self, beam_size, grammar, trace=0):
"""
Create a new C{BottomUpChartParse}, that uses C{grammar}
to parse texts.
@type beam_size: C{int}
@param beam_size: The maximum length for the parser's edge queue.
@type grammar: C{pcfg.Grammar}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
BottomUpChartParse.__init__(self, grammar, trace)
self._beam_size = beam_size
def sort_queue(self, queue, chart):
queue.sort(lambda e1,e2:cmp(e1.prob(), e2.prob()))
if len(queue) > self._beam_size:
split = len(queue)-self._beam_size
if self._trace > 2:
for edge in queue[:split]:
print ' %-50s [DISCARDED]' % chart.pp_edge(edge,2)
queue[:] = queue[split:]
##//////////////////////////////////////////////////////
## Test Code
##//////////////////////////////////////////////////////
def demo():
"""
A demonstration of the probabilistic parsers. The user is
prompted to select which demo to run, and how many parses should
be found; and then each parser is run on the same demo, and a
summary of the results are displayed.
"""
import sys, time
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse import cfg, pcfg, pchart
# Define two demos. Each demo has a sentence and a grammar.
demos = [('I saw John with my cookie', pcfg.toy1),
('the boy saw Jack with Bob under the table with a telescope',
pcfg.toy2)]
# Ask the user which demo they want to use.
print
for i in range(len(demos)):
print '%3s: %s' % (i+1, demos[i][0])
print ' %r' % demos[i][1]
print
print 'Which demo (%d-%d)? ' % (1, len(demos)),
try:
snum = int(sys.stdin.readline().strip())-1
sent, grammar = demos[snum]
except:
print 'Bad sentence number'
return
# Tokenize the sentence.
tokens = list(tokenize.whitespace(sent))
# Define a list of parsers. We'll use all parsers.
parsers = [
pchart.InsideParse(grammar),
pchart.RandomParse(grammar),
pchart.UnsortedParse(grammar),
pchart.LongestParse(grammar),
pchart.BeamParse(len(tokens)+1, grammar)
]
# Run the parsers on the tokenized sentence.
times = []
average_p = []
num_parses = []
all_parses = {}
for parser in parsers:
print '\ns: %s\nparser: %s\ngrammar: %s' % (sent,parser,pcfg)
parser.trace(3)
t = time.time()
parses = parser.get_parse_list(tokens)
times.append(time.time()-t)
if parses: p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
average_p.append(p)
num_parses.append(len(parses))
for p in parses: all_parses[p.freeze()] = 1
# Print some summary statistics
print
print ' Parser | Time (secs) # Parses Average P(parse)'
print '-------------------+------------------------------------------'
for i in range(len(parsers)):
print '%18s |%11.4f%11d%19.14f' % (parsers[i].__class__.__name__,
times[i],num_parses[i],average_p[i])
parses = all_parses.keys()
if parses: p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
print '-------------------+------------------------------------------'
print '%18s |%11s%11d%19.14f' % ('(All Parses)', 'n/a', len(parses), p)
# Ask the user if we should draw the parses.
print
print 'Draw parses (y/n)? ',
if sys.stdin.readline().strip().lower().startswith('y'):
from en.parser.nltk_lite.draw.tree import draw_trees
print ' please wait...'
draw_trees(*parses)
# Ask the user if we should print the parses.
print
print 'Print parses (y/n)? ',
if sys.stdin.readline().strip().lower().startswith('y'):
for parse in parses:
print parse
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Reader for Grammar Files
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Rob Speer <rspeer@mit.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: grammarfile.py 3588 2006-10-20 06:13:57Z ehk $
"""
A module to read a grammar from a *.cfg file.
"""
from en.parser.nltk_lite.parse.category import *
from en.parser.nltk_lite.parse import cfg
from en.parser.nltk_lite.parse.featurechart import *
class GrammarFile(object):
def __init__(self):
self.grammatical_productions = []
self.lexical_productions = []
self.start = GrammarCategory(pos='Start').freeze()
def grammar(self):
return cfg.Grammar(self.start, self.grammatical_productions +\
self.lexical_productions)
def earley_grammar(self):
return cfg.Grammar(self.start, self.grammatical_productions)
def earley_lexicon(self):
lexicon = {}
for prod in self.lexical_productions:
lexicon.setdefault(prod.rhs()[0], []).append(prod.lhs())
return lexicon
def earley_parser(self, trace=1):
return FeatureEarleyChartParse(self.earley_grammar(),
self.earley_lexicon(), trace=trace)
def apply_lines(self, lines):
for line in lines:
line = line.strip()
if not len(line): continue
if line[0] == '#': continue
if line[0] == '%':
parts = line[1:].split()
directive = parts[0]
args = " ".join(parts[1:])
if directive == 'start':
self.start = GrammarCategory.parse(args).freeze()
elif directive == 'include':
filename = args.strip('"')
self.apply_file(filename)
else:
rules = GrammarCategory.parse_rules(line)
for rule in rules:
if len(rule.rhs()) == 1 and isinstance(rule.rhs()[0], str):
self.lexical_productions.append(rule)
else:
self.grammatical_productions.append(rule)
def apply_file(self, filename):
f = open(filename)
lines = f.readlines()
self.apply_lines(lines)
f.close()
def read_file(filename):
result = GrammarFile()
result.apply_file(filename)
return result
read_file = staticmethod(read_file)
def demo():
g = GrammarFile.read_file("test.cfg")
print g.grammar()
if __name__ == '__main__':
demo()
| Python |
from en.parser.nltk_lite.parse import cfg
def generate(grammar, start=None):
if not start:
start = grammar.start()
return _generate_all(grammar, [start])[0]
def _generate_all(grammar, items):
frags = []
if len(items) == 1:
if isinstance(items[0], cfg.Nonterminal):
for prod in grammar.productions(lhs=items[0]):
frags.append(_generate_all(grammar, prod.rhs()))
else:
frags.append(items[0])
else:
for frag1 in _generate_all(grammar, [items[0]]):
for frag2 in _generate_all(grammar, items[1:]):
for frag in _multiply(frag1, frag2):
frags.append(frag)
return frags
def _multiply(frag1, frag2):
frags = []
if len(frag1) == 1:
frag1 = [frag1]
if len(frag2) == 1:
frag2 = [frag2]
for f1 in frag1:
for f2 in frag2:
frags.append(f1+f2)
return frags
grammar = cfg.parse_grammar("""
S -> NP VP
NP -> Det N
VP -> V NP
Det -> 'the'
Det -> 'a'
N -> 'man' | 'park' | 'dog' | 'telescope'
V -> 'saw' | 'walked'
P -> 'in' | 'with'
""")
for sent in generate(grammar):
print sent
| Python |
# Natural Language Toolkit: Viterbi Probabilistic Parser
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from en.parser.nltk_lite.parse import ParseI, AbstractParse
from en.parser.nltk_lite.parse import cfg, pcfg
from en.parser.nltk_lite.parse.tree import Tree, ProbabilisticTree
from en.parser.nltk_lite.parse.chart import Chart, LeafEdge, TreeEdge, AbstractChartRule
import types
##//////////////////////////////////////////////////////
## Viterbi PCFG Parser
##//////////////////////////////////////////////////////
class ViterbiParse(AbstractParse):
"""
A bottom-up C{PCFG} parser that uses dynamic programming to find
the single most likely parse for a text. The C{ViterbiParse} parser
parses texts by filling in a X{most likely constituent table}.
This table records the most probable tree representation for any
given span and node value. In particular, it has an entry for
every start index, end index, and node value, recording the most
likely subtree that spans from the start index to the end index,
and has the given node value.
The C{ViterbiParse} parser fills in this table incrementally. It starts
by filling in all entries for constituents that span one element
of text (i.e., entries where the end index is one greater than the
start index). After it has filled in all table entries for
constituents that span one element of text, it fills in the
entries for constitutants that span two elements of text. It
continues filling in the entries for constituents spanning larger
and larger portions of the text, until the entire table has been
filled. Finally, it returns the table entry for a constituent
spanning the entire text, whose node value is the grammar's start
symbol.
In order to find the most likely constituent with a given span and
node value, the C{ViterbiParse} parser considers all productions that
could produce that node value. For each production, it finds all
children that collectively cover the span and have the node values
specified by the production's right hand side. If the probability
of the tree formed by applying the production to the children is
greater than the probability of the current entry in the table,
then the table is updated with this new tree.
A pseudo-code description of the algorithm used by
C{ViterbiParse} is:
- Create an empty most likely constituent table, M{MLC}.
- For M{width} in 1...len(M{text}):
- For M{start} in 1...len(M{text})-M{width}:
- For M{prod} in grammar.productions:
- For each sequence of subtrees [M{t[1]}, M{t[2]}, ...,
M{t[n]}] in M{MLC}, where M{t[i]}.node==M{prod}.rhs[i],
and the sequence covers [M{start}:M{start}+M{width}]:
- M{old_p} = M{MLC}[M{start}, M{start+width}, M{prod}.lhs]
- M{new_p} = P(M{t[1]})*P(M{t[1]})*...*P(M{t[n]})*P(M{prod})
- if M{new_p} > M{old_p}:
- M{new_tree} = Tree(M{prod}.lhs, M{t[1]}, M{t[2]},
..., M{t[n]})
- M{MLC}[M{start}, M{start+width}, M{prod}.lhs]
= M{new_tree}
- Return M{MLC}[0, len(M{text}), M{start_symbol}]
@type _grammar: C{pcfg.Grammar}
@ivar _grammar: The grammar used to parse sentences.
@type _trace: C{int}
@ivar _trace: The level of tracing output that should be generated
when parsing a text.
"""
def __init__(self, grammar, trace=0):
"""
Create a new C{ViterbiParse} parser, that uses {grammar} to
parse texts.
@type grammar: C{pcfg.Grammar}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._trace = trace
AbstractParse.__init__(self)
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
@type trace: C{int}
@param trace: The trace level. A trace level of C{0} will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
@rtype: C{None}
"""
self._trace = trace
def get_parse_list(self, tokens):
# Inherit docs from ParseI
# The most likely constituent table. This table specifies the
# most likely constituent for a given span and type.
# Constituents can be either Trees or Tokens. For
# Trees, the "type" is the Nonterminal for the tree's
# root node value. For Tokens, the "type" is the token's
# type. The table is stored as a dictionary, since it is
# sparse.
constituents = {}
# Initialize the constituents dictionary with the words from
# the text.
if self._trace: print ('Inserting tokens into the most likely'+
' constituents table...')
for index in range(len(tokens)):
token = tokens[index]
constituents[index,index+1,token] = token
if self._trace > 1:
self._trace_lexical_insertion(token, index, len(tokens))
# Consider each span of length 1, 2, ..., n; and add any trees
# that might cover that span to the constituents dictionary.
for length in range(1, len(tokens)+1):
if self._trace:
print ('Finding the most likely constituents'+
' spanning %d text elements...' % length)
#print constituents
for start in range(len(tokens)-length+1):
span = (start, start+length)
self._add_constituents_spanning(span, constituents,
tokens)
# Find all trees that span the entire text & have the right cat
trees = [constituents.get((0, len(tokens),
self._grammar.start()), [])]
# Sort the trees, and return the requested number of them.
trees.sort(lambda t1,t2: cmp(t2.prob(), t1.prob()))
return trees
def _add_constituents_spanning(self, span, constituents, tokens):
"""
Find any constituents that might cover C{span}, and add them
to the most likely constituents table.
@rtype: C{None}
@type span: C{(int, int)}
@param span: The section of the text for which we are
trying to find possible constituents. The span is
specified as a pair of integers, where the first integer
is the index of the first token that should be included in
the constituent; and the second integer is the index of
the first token that should not be included in the
constituent. I.e., the constituent should cover
C{M{text}[span[0]:span[1]]}, where C{M{text}} is the text
that we are parsing.
@type constituents: C{dictionary} from
C{(int,int,Nonterminal)} to (C{ProbabilisticToken} or
C{ProbabilisticTree}).
@param constituents: The most likely constituents table. This
table records the most probable tree representation for
any given span and node value. In particular,
C{constituents(M{s},M{e},M{nv})} is the most likely
C{ProbabilisticTree} that covers C{M{text}[M{s}:M{e}]}
and has a node value C{M{nv}.symbol()}, where C{M{text}}
is the text that we are parsing. When
C{_add_constituents_spanning} is called, C{constituents}
should contain all possible constituents that are shorter
than C{span}.
@type tokens: C{list} of tokens
@param tokens: The text we are parsing. This is only used for
trace output.
"""
# Since some of the grammar productions may be unary, we need to
# repeatedly try all of the productions until none of them add any
# new constituents.
changed = 1
while changed:
changed = 0
# Find all ways instantiations of the grammar productions that
# cover the span.
instantiations = self._find_instantiations(span, constituents)
# For each production instantiation, add a new
# ProbabilisticTree whose probability is the product
# of the childrens' probabilities and the production's
# probability.
for (production, children) in instantiations:
subtrees = [c for c in children if isinstance(c, Tree)]
p = reduce(lambda pr,t:pr*t.prob(),
subtrees, production.prob())
node = production.lhs().symbol()
tree = ProbabilisticTree(node, children, prob=p)
# If it's new a constituent, then add it to the
# constituents dictionary.
c = constituents.get((span[0], span[1], production.lhs()),
None)
if self._trace > 1:
if c is None or c != tree:
if c is None or c.prob() < tree.prob():
print ' Insert:',
else:
print ' Discard:',
self._trace_production(production, p, span, len(tokens))
if c is None or c.prob() < tree.prob():
constituents[span[0], span[1], production.lhs()] = tree
changed = 1
def _find_instantiations(self, span, constituents):
"""
@return: a list of the production instantiations that cover a
given span of the text. A X{production instantiation} is
a tuple containing a production and a list of children,
where the production's right hand side matches the list of
children; and the children cover C{span}. @rtype: C{list}
of C{pair} of C{Production}, (C{list} of
(C{ProbabilisticTree} or token.
@type span: C{(int, int)}
@param span: The section of the text for which we are
trying to find production instantiations. The span is
specified as a pair of integers, where the first integer
is the index of the first token that should be covered by
the production instantiation; and the second integer is
the index of the first token that should not be covered by
the production instantiation.
@type constituents: C{dictionary} from
C{(int,int,Nonterminal)} to (C{ProbabilisticToken} or
C{ProbabilisticTree}).
@param constituents: The most likely constituents table. This
table records the most probable tree representation for
any given span and node value. See the module
documentation for more information.
"""
rv = []
for production in self._grammar.productions():
childlists = self._match_rhs(production.rhs(), span, constituents)
for childlist in childlists:
rv.append( (production, childlist) )
return rv
def _match_rhs(self, rhs, span, constituents):
"""
@return: a set of all the lists of children that cover C{span}
and that match C{rhs}.
@rtype: C{list} of (C{list} of C{ProbabilisticTree} or
C{Token})
@type rhs: C{list} of C{Nonterminal} or (any)
@param rhs: The list specifying what kinds of children need to
cover C{span}. Each nonterminal in C{rhs} specifies
that the corresponding child should be a tree whose node
value is that nonterminal's symbol. Each terminal in C{rhs}
specifies that the corresponding child should be a token
whose type is that terminal.
@type span: C{(int, int)}
@param span: The section of the text for which we are
trying to find child lists. The span is specified as a
pair of integers, where the first integer is the index of
the first token that should be covered by the child list;
and the second integer is the index of the first token
that should not be covered by the child list.
@type constituents: C{dictionary} from
C{(int,int,Nonterminal)} to (C{ProbabilisticToken} or
C{ProbabilisticTree}).
@param constituents: The most likely constituents table. This
table records the most probable tree representation for
any given span and node value. See the module
documentation for more information.
"""
(start, end) = span
# Base case
if start >= end and rhs == (): return [[]]
if start >= end or rhs == (): return []
# Find everything that matches the 1st symbol of the RHS
childlists = []
for split in range(start, end+1):
l=constituents.get((start,split,rhs[0]))
if l is not None:
rights = self._match_rhs(rhs[1:], (split,end), constituents)
childlists += [[l]+r for r in rights]
return childlists
def _trace_production(self, production, p, span, width):
"""
Print trace output indicating that a given production has been
applied at a given location.
@param production: The production that has been applied
@type production: C{Production}
@param p: The probability of the tree produced by the production.
@type p: C{float}
@param span: The span of the production
@type span: C{tuple}
@rtype: C{None}
"""
str = '|' + '.' * span[0]
str += '=' * (span[1] - span[0])
str += '.' * (width - span[1]) + '| '
str += '%s' % production
if self._trace > 2: str = '%-40s %12.10f ' % (str, p)
print str
def _trace_lexical_insertion(self, token, index, width):
str = ' Insert: |' + '.' * index + '=' + '.' * (width-index-1) + '| '
str += '%s' % (token,)
print str
def __repr__(self):
return '<ViterbiParser for %r>' % self._grammar
##//////////////////////////////////////////////////////
## Test Code
##//////////////////////////////////////////////////////
def demo():
"""
A demonstration of the probabilistic parsers. The user is
prompted to select which demo to run, and how many parses should
be found; and then each parser is run on the same demo, and a
summary of the results are displayed.
"""
import sys, time
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse import cfg, pcfg, ViterbiParse
# Define two demos. Each demo has a sentence and a grammar.
demos = [('I saw John with my cookie', pcfg.toy1),
('the boy saw Jack with Bob under the table with a telescope',
pcfg.toy2)]
# Ask the user which demo they want to use.
print
for i in range(len(demos)):
print '%3s: %s' % (i+1, demos[i][0])
print ' %r' % demos[i][1]
print
print 'Which demo (%d-%d)? ' % (1, len(demos)),
try:
snum = int(sys.stdin.readline().strip())-1
sent, grammar = demos[snum]
except:
print 'Bad sentence number'
return
# Tokenize the sentence.
tokens = list(tokenize.whitespace(sent))
parser = ViterbiParse(grammar)
all_parses = {}
print '\nsent: %s\nparser: %s\ngrammar: %s' % (sent,parser,grammar)
parser.trace(3)
t = time.time()
parses = parser.get_parse_list(tokens)
time = time.time()-t
if parses:
average = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else:
average = 0
num_parses = len(parses)
for p in parses:
all_parses[p.freeze()] = 1
# Print some summary statistics
print
print 'Time (secs) # Parses Average P(parse)'
print '-----------------------------------------'
print '%11.4f%11d%19.14f' % (time, num_parses, average)
parses = all_parses.keys()
if parses:
p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
print '------------------------------------------'
print '%11s%11d%19.14f' % ('n/a', len(parses), p)
# Ask the user if we should draw the parses.
print
print 'Draw parses (y/n)? ',
if sys.stdin.readline().strip().lower().startswith('y'):
from en.parser.nltk_lite.draw.tree import draw_trees
print ' please wait...'
draw_trees(*parses)
# Ask the user if we should print the parses.
print
print 'Print parses (y/n)? ',
if sys.stdin.readline().strip().lower().startswith('y'):
for parse in parses:
print parse
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Context Free Grammars
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
"""
Basic data classes for representing context free grammars. A
X{grammar} specifies which trees can represent the structure of a
given text. Each of these trees is called a X{parse tree} for the
text (or simply a X{parse}). In a X{context free} grammar, the set of
parse trees for any piece of a text can depend only on that piece, and
not on the rest of the text (i.e., the piece's context). Context free
grammars are often used to find possible syntactic structures for
sentences. In this context, the leaves of a parse tree are word
tokens; and the node values are phrasal categories, such as C{NP}
and C{VP}.
The L{Grammar} class is used to encode context free grammars. Each C{Grammar}
consists of a start symbol and a set of productions. The X{start
symbol} specifies the root node value for parse trees. For example,
the start symbol for syntactic parsing is usually C{S}. Start
symbols are encoded using the C{Nonterminal} class, which is discussed
below.
A Grammar's X{productions} specify what parent-child relationships a parse
tree can contain. Each production specifies that a particular
node can be the parent of a particular set of children. For example,
the production C{<S> -> <NP> <VP>} specifies that an C{S} node can
be the parent of an C{NP} node and a C{VP} node.
Grammar productions are implemented by the C{Production} class.
Each C{Production} consists of a left hand side and a right hand
side. The X{left hand side} is a C{Nonterminal} that specifies the
node type for a potential parent; and the X{right hand side} is a list
that specifies allowable children for that parent. This lists
consists of C{Nonterminals} and text types: each C{Nonterminal}
indicates that the corresponding child may be a C{TreeToken} with the
specified node type; and each text type indicates that the
corresponding child may be a C{Token} with the with that type.
The C{Nonterminal} class is used to distinguish node values from leaf
values. This prevents the grammar from accidentally using a leaf
value (such as the English word "A") as the node of a subtree. Within
a C{Grammar}, all node values are wrapped in the C{Nonterminal} class.
Note, however, that the trees that are specified by the grammar do
B{not} include these C{Nonterminal} wrappers.
Grammars can also be given a more procedural interpretation. According to
this interpretation, a Grammar specifies any tree structure M{tree} that
can be produced by the following procedure:
- Set M{tree} to the start symbol
- Repeat until M{tree} contains no more nonterminal leaves:
- Choose a production M{prod} with whose left hand side
M{lhs} is a nonterminal leaf of M{tree}.
- Replace the nonterminal leaf with a subtree, whose node
value is the value wrapped by the nonterminal M{lhs}, and
whose children are the right hand side of M{prod}.
The operation of replacing the left hand side (M{lhs}) of a production
with the right hand side (M{rhs}) in a tree (M{tree}) is known as
X{expanding} M{lhs} to M{rhs} in M{tree}.
"""
import re
#################################################################
# Nonterminal
#################################################################
class Nonterminal(object):
"""
A non-terminal symbol for a context free grammar. C{Nonterminal}
is a wrapper class for node values; it is used by
C{Production}s to distinguish node values from leaf values.
The node value that is wrapped by a C{Nonterminal} is known as its
X{symbol}. Symbols are typically strings representing phrasal
categories (such as C{"NP"} or C{"VP"}). However, more complex
symbol types are sometimes used (e.g., for lexicalized grammars).
Since symbols are node values, they must be immutable and
hashable. Two C{Nonterminal}s are considered equal if their
symbols are equal.
@see: L{Grammar}
@see: L{Production}
@type _symbol: (any)
@ivar _symbol: The node value corresponding to this
C{Nonterminal}. This value must be immutable and hashable.
"""
def __init__(self, symbol):
"""
Construct a new non-terminal from the given symbol.
@type symbol: (any)
@param symbol: The node value corresponding to this
C{Nonterminal}. This value must be immutable and
hashable.
"""
self._symbol = symbol
self._hash = hash(symbol)
def symbol(self):
"""
@return: The node value corresponding to this C{Nonterminal}.
@rtype: (any)
"""
return self._symbol
def __eq__(self, other):
"""
@return: True if this non-terminal is equal to C{other}. In
particular, return true iff C{other} is a C{Nonterminal}
and this non-terminal's symbol is equal to C{other}'s
symbol.
@rtype: C{boolean}
"""
try:
return ((self._symbol == other._symbol) \
and isinstance(other, self.__class__))
except AttributeError:
return False
def __ne__(self, other):
"""
@return: True if this non-terminal is not equal to C{other}. In
particular, return true iff C{other} is not a C{Nonterminal}
or this non-terminal's symbol is not equal to C{other}'s
symbol.
@rtype: C{boolean}
"""
return not (self==other)
def __cmp__(self, other):
if self == other: return 0
else: return -1
def __hash__(self):
return self._hash
def __repr__(self):
"""
@return: A string representation for this C{Nonterminal}.
The string representation for a C{Nonterminal} whose
symbol is C{M{s}} is C{<M{s}>}.
@rtype: C{string}
"""
# [XX] not a good repr! Token uses this now!!
return '<%s>' % (self._symbol,)
def __str__(self):
"""
@return: A string representation for this C{Nonterminal}.
The string representation for a C{Nonterminal} whose
symbol is C{M{s}} is C{M{s}}.
@rtype: C{string}
"""
return '%s' % (self._symbol,)
def __div__(self, rhs):
"""
@return: A new nonterminal whose symbol is C{M{A}/M{B}}, where
C{M{A}} is the symbol for this nonterminal, and C{M{B}}
is the symbol for rhs.
@rtype: L{Nonterminal}
@param rhs: The nonterminal used to form the right hand side
of the new nonterminal.
@type rhs: L{Nonterminal}
"""
return Nonterminal('%s/%s' % (self._symbol, rhs._symbol))
def nonterminals(symbols):
"""
Given a string containing a list of symbol names, return a list of
C{Nonterminals} constructed from those symbols.
@param symbols: The symbol name string. This string can be
delimited by either spaces or commas.
@type symbols: C{string}
@return: A list of C{Nonterminals} constructed from the symbol
names given in C{symbols}. The C{Nonterminals} are sorted
in the same order as the symbols names.
@rtype: C{list} of L{Nonterminal}
"""
if ',' in symbols: symbol_list = symbols.split(',')
else: symbol_list = symbols.split()
return [Nonterminal(s.strip()) for s in symbol_list]
#################################################################
# Production and Grammar
#################################################################
class Production(object):
"""
A context-free grammar production. Each production
expands a single C{Nonterminal} (the X{left-hand side}) to a
sequence of terminals and C{Nonterminals} (the X{right-hand
side}). X{terminals} can be any immutable hashable object that is
not a C{Nonterminal}. Typically, terminals are strings
representing word types, such as C{"dog"} or C{"under"}.
Abstractly, a Grammar production indicates that the right-hand side is
a possible X{instantiation} of the left-hand side. Grammar
productions are X{context-free}, in the sense that this
instantiation should not depend on the context of the left-hand
side or of the right-hand side.
@see: L{Grammar}
@see: L{Nonterminal}
@type _lhs: L{Nonterminal}
@ivar _lhs: The left-hand side of the production.
@type _rhs: C{tuple} of (C{Nonterminal} and (terminal))
@ivar _rhs: The right-hand side of the production.
"""
def __init__(self, lhs, rhs):
"""
Construct a new C{Production}.
@param lhs: The left-hand side of the new C{Production}.
@type lhs: L{Nonterminal}
@param rhs: The right-hand side of the new C{Production}.
@type rhs: sequence of (C{Nonterminal} and (terminal))
"""
if isinstance(rhs, (str, unicode)):
raise TypeError, 'production right hand side should be a list, not a string'
self._lhs = lhs
self._rhs = tuple(rhs)
self._hash = hash((self._lhs, self._rhs))
def lhs(self):
"""
@return: the left-hand side of this C{Production}.
@rtype: L{Nonterminal}
"""
return self._lhs
def rhs(self):
"""
@return: the right-hand side of this C{Production}.
@rtype: sequence of (C{Nonterminal} and (terminal))
"""
return self._rhs
def __str__(self):
"""
@return: A verbose string representation of the
C{Production}.
@rtype: C{string}
"""
str = '%s ->' % (self._lhs.symbol(),)
for elt in self._rhs:
if isinstance(elt, Nonterminal):
str += ' %s' % (elt.symbol(),)
else:
str += ' %r' % (elt,)
return str
def __repr__(self):
"""
@return: A concise string representation of the
C{Production}.
@rtype: C{string}
"""
return '%s' % self
def __eq__(self, other):
"""
@return: true if this C{Production} is equal to C{other}.
@rtype: C{boolean}
"""
return (isinstance(other, self.__class__) and
self._lhs == other._lhs and
self._rhs == other._rhs)
def __ne__(self, other):
return not (self == other)
def __cmp__(self, other):
if not isinstance(other, self.__class__): return -1
return cmp((self._lhs, self._rhs), (other._lhs, other._rhs))
def __hash__(self):
"""
@return: A hash value for the C{Production}.
@rtype: C{int}
"""
return self._hash
class Grammar(object):
"""
A context-free grammar. A Grammar consists of a start state and a set
of productions. The set of terminals and nonterminals is
implicitly specified by the productions.
If you need efficient key-based access to productions, you
can use a subclass to implement it.
"""
def __init__(self, start, productions):
"""
Create a new context-free grammar, from the given start state
and set of C{Production}s.
@param start: The start symbol
@type start: L{Nonterminal}
@param productions: The list of productions that defines the grammar
@type productions: C{list} of L{Production}
"""
self._start = start
self._productions = tuple(productions)
self._lhs_index = {}
self._rhs_index = {}
for prod in self._productions:
if prod._lhs not in self._lhs_index:
self._lhs_index[prod._lhs] = []
if prod._rhs and prod._rhs[0] not in self._rhs_index:
self._rhs_index[prod._rhs[0]] = []
self._lhs_index[prod._lhs].append(prod)
if prod._rhs:
self._rhs_index[prod._rhs[0]].append(prod)
def start(self):
return self._start
# buggy: needs to cope with both lhs and rhs specified
# needs to raise an error if *hs is specified but not in index
# check nothing breaks when this is fixed...
def productions(self, lhs=None, rhs=None):
if lhs and lhs in self._lhs_index:
return self._lhs_index[lhs]
elif rhs and rhs in self._rhs_index:
return self._rhs_index[rhs]
else:
return self._productions
def __repr__(self):
return '<Grammar with %d productions>' % len(self._productions)
def __str__(self):
str = 'Grammar with %d productions' % len(self._productions)
str += ' (start state = %s)' % self._start
for production in self._productions:
str += '\n %s' % production
return str
_PARSE_RE = re.compile(r'''^(\w+)\s* # lhs
(?:-+>|=+>)\s* # arrow
(?:( # rhs:
"[^"]+" # doubled-quoted terminal
|'[^']+' # single-quoted terminal
|\w+| # non-terminal
\| # disjunction
)
\s*) # trailing space
*$''',
re.VERBOSE)
_SPLIT_RE = re.compile(r'''(\w+|-+>|=+>|"[^"]+"|'[^']+'|\|)''')
def parse_production(s):
"""
Returns a list of productions
"""
# Use _PARSE_RE to check that it's valid.
if not _PARSE_RE.match(s):
raise ValueError, 'Bad production string'
# Use _SPLIT_RE to process it.
pieces = _SPLIT_RE.split(s)
pieces = [p for i,p in enumerate(pieces) if i%2==1]
lhside = Nonterminal(pieces[0])
rhsides = [[]]
for piece in pieces[2:]:
if piece == '|':
rhsides.append([]) # Vertical bar
elif piece[0] in ('"', "'"):
rhsides[-1].append(piece[1:-1]) # Terminal
else:
rhsides[-1].append(Nonterminal(piece)) # Nonterminal
return [Production(lhside, rhside) for rhside in rhsides]
def parse_grammar(s):
productions = []
for linenum, line in enumerate(s.split('\n')):
line = line.strip()
if line.startswith('#') or line=='': continue
try: productions += parse_production(line)
except ValueError:
raise ValueError, 'Unable to parse line %s' % linenum
if len(productions) == 0:
raise ValueError, 'No productions found!'
start = productions[0].lhs()
return Grammar(start, productions)
#################################################################
# Demonstration
#################################################################
def demo():
"""
A demonstration showing how C{Grammar}s can be created and used.
"""
from en.parser.nltk_lite.parse import cfg
# Create some nonterminals
S, NP, VP, PP = cfg.nonterminals('S, NP, VP, PP')
N, V, P, Det = cfg.nonterminals('N, V, P, Det')
VP_slash_NP = VP/NP
print 'Some nonterminals:', [S, NP, VP, PP, N, V, P, Det, VP/NP]
print ' S.symbol() =>', `S.symbol()`
print
print cfg.Production(S, [NP])
# Create some Grammar Productions
grammar = cfg.parse_grammar("""
S -> NP VP
PP -> P NP
NP -> Det N
NP -> NP PP
VP -> V NP
VP -> VP PP
Det -> 'a'
Det -> 'the'
N -> 'dog'
N -> 'cat'
V -> 'chased'
V -> 'sat'
P -> 'on'
P -> 'in'
""")
print 'A Grammar:', `grammar`
print ' grammar.start() =>', `grammar.start()`
print ' grammar.productions() =>',
# Use string.replace(...) is to line-wrap the output.
print `grammar.productions()`.replace(',', ',\n'+' '*25)
print
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: Feature Structures
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>,
# Steven Bird <sb@csse.unimelb.edu.au>
# Rob Speer (original code)
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
#
# $Id: featurestructure.py 3273 2006-05-30 21:00:11Z stevenbird $
"""
Basic data classes for representing feature structures. A X{feature
structure} is a mapping from feature names to feature values, where:
- Each X{feature name} is a case sensitive string.
- Each X{feature value} can be a base value (such as a string), a
variable, or a nested feature structure.
Feature structures are typically used to represent partial information
about objects. A feature name that is not mapped to a value stands
for a feature whose value is unknown (I{not} a feature without a
value). Two feature structures that represent (potentially
overlapping) information about the same object can be combined by
X{unification}. When two inconsistant feature structures are unified,
the unification fails and returns C{None}.
Features are usually specified using X{feature paths}, or tuples of
feature names that specify path through the nested feature structures
to a value.
Feature structures may contain reentrant feature values. A
X{reentrant feature value} is a single feature value that can be
accessed via multiple feature paths. Unification preserves the
reentrance relations imposed by both of the unified feature
structures. After unification, any extensions to a reentrant feature
value will be visible using any of its feature paths.
Feature structure variables are encoded using the
L{FeatureVariable} class. Feature structure variables are
essentially just names; they do not directly contain values. Instead,
the mapping from variables to values is encoded externally to the
variable, as a set of X{bindings}. These bindings are stored using
the L{FeatureBindings} class.
@todo: more test cases
@sort: FeatureStructure, FeatureVariable, AliasedFeatureVariable,
FeatureBindings
@group Feature Structures: FeatureStructure
@group Variables: FeatureVariable, AliasedFeatureVariable,
FeatureBindings
@group Unit Tests: FeatureStructureTestCase
"""
# Open question: should there be a "feature" object?
#
# The current implementation doesn't actually include an object to
# encode a "feature" (i.e., a name/value pair). This makes the code
# simpler -- one less class to deal with, and you can directly query
# for feature values, rather than going through a feature object. But
# there might be use cases for adding a Feature object. E.g., if we
# wanted to assign properties (like is_required) to features. But I'd
# like to see some compelling use cases before we add it.
import re
from types import NoneType
#//////////////////////////////////////////////////////////////////////
# Variables and variable bindings
#//////////////////////////////////////////////////////////////////////
class SubstituteBindingsI:
"""
An interface for classes that can perform substitutions for feature
variables.
"""
def substitute_bindings(self, bindings):
"""
@return: The object that is obtained by replacing
each variable bound by C{bindings} with its values.
@rtype: (any)
"""
raise NotImplementedError
class FeatureVariable(SubstituteBindingsI):
"""
A variable that can stand for a single feature value in a feature
structure. Each variable is defined by a unique identifier, which
can be either a case-sensitive string (for X{named variables}) or
an integer (for X{numbered variables}).
Named variables are created by calling the C{FeatureVariable}
constructor with a string identifier. If multiple named variables
objects are created with the same identifier, then they represent
the same variable. Numbered variables are created by calling the
C{FeatureVariable} constructor with no arguments; a new identifier
will be automatically generated. Each new numbered variable object
is guaranteed to have a unique identifier.
Variables do not directly contain values; instead, the mapping
from variables to values is encoded externally as a set of
bindings, using L{FeatureBindings}. If a set of
bindings assigns a value to a variable, then that variable is said
to be X{bound} with respect to those bindings; otherwise, it is
said to be X{unbound}.
@see: L{FeatureStructure}
"""
_next_numbered_id = 1
def __init__(self, identifier=None):
"""
Construct a new feature structure variable.
@type identifier: C{string}
@param identifier: A unique identifier for this variable.
Any two C{FeatureVariable} objects with the
same identifier are treated as the same variable.
"""
if identifier is None:
self._identifier = FeatureVariable._next_numbered_id
FeatureVariable._next_numbered_id += 1
else:
self._identifier = identifier
def identifier(self):
"""
@return: This variable's unique identifier.
@rtype: C{string}
"""
return self._identifier
def __repr__(self):
"""
@return: A string representation of this feature structure
variable. A feature structure variable with identifier
C{I{x}} is represented as C{'?I{x}'}.
"""
return '?%s' % self._identifier
def __cmp__(self, other):
if not isinstance(other, FeatureVariable): return -1
return cmp(self._identifier, other._identifier)
def __hash__(self):
return self._identifier.__hash__()
def alias(self, variable):
"""
Return an aliased variable that constrains this variable to be
equal to C{variable}.
@rtype: L{AliasedFeatureVariable}
"""
if self == variable: return self
return AliasedFeatureVariable(self, variable)
def substitute_bindings(self, bindings):
"""
@return: The value that is bound to this variable if it appears in
@C{bindings} otherwise just return self.
@rtype: (any)
"""
if bindings.is_bound(self):
return bindings.lookup(self)
else:
return self
# [staticmethod]
def parse(s):
"""
Given a string that encodes a feature variable, return that
variable. This method can be used to parse both
C{FeatureVariables} and C{AliasedFeatureVariables}. However,
this method can not be used to parse numbered variables, since
doing so could violate the guarantee that each numbered
variable object has a unique identifier.
"""
# Simple variable
match = re.match(r'\?[a-zA-Z_][a-zA-Z0-9_]*$', s)
if match:
return FeatureVariable(s[1:])
# Aliased variable
match = re.match(r'\?<[a-zA-Z_][a-zA-Z0-9_]*'+
r'(=[a-zA-Z_][a-zA-Z0-9_]*)*>$', s)
if match:
idents = s[2:-1].split('=')
vars = [FeatureVariable(i) for i in idents]
return AliasedFeatureVariable(*vars)
raise ValueError('Bad FeatureVariable string')
parse=staticmethod(parse)
class AliasedFeatureVariable(FeatureVariable):
"""
A set of variables that are constrained to be equal. An aliased
variable can be used in place of a simple variable. In
particular, an aliased variable stands for a single feature value,
and requires that each its aliases are bound to that same
value. Aliased variables can be categorized according to their
values in a set of bindings:
- An aliased variable is X{unbound} if none of its aliases
is assigned a value.
- An aliased variable is X{bound} if at least one of its
aliases is bound, and all of its bound aliases are
assigned the same value. (If at least one alias is
unbound, then the aliased variable is said to be X{partially
bound}.)
- An aliased variable is X{inconsistant} if two or more
aliases are bound to different values.
@ivar _aliases: The set of aliases contained by
this aliased variable. This set is encoded as a dictionary
whose keys are variables.
"""
def __init__(self, *aliases):
"""
Construct a new feature structure variable that contains the
given aliases. If C{aliases} contains aliased
variables, then they are replaced by their lists of
aliases.
@raise ValueError: If no aliases are specified.
"""
if len(aliases) == 0:
raise ValueError('Expected at least one alias')
self._aliases = {}
for subvar in aliases:
if isinstance(subvar, AliasedFeatureVariable):
self._aliases.update(subvar._aliases)
else:
self._aliases[subvar] = 1
def identifier(self):
"""
Raise C{ValueError}, since aliased variables do not have a
single identifier.
"""
raise ValueError('Aliased variables do not have identifiers')
def aliases(self):
"""
@return: A list of the variables that are constrained to be
equal by this aliased variable.
"""
return self._aliases.keys()
def __repr__(self):
"""
@return: A string representation of this feature structure
variable. A feature structure variable with identifiers
C{I{X1}, I{X2}, ..., I{Xn}} is represented as
C{'?<I{X1}=I{X2}=...=I{Xn}>'}.
"""
idents = [v._identifier for v in self.aliases()]
idents.sort()
return '?<' + '='.join(idents) + '>'
def __cmp__(self):
if not isinstance(other, FeatureVariable): return -1
return cmp(self._aliases, other._identifier)
def __hash__(self):
return self._aliases.__hash__()
class FeatureBindings(object):
"""
A partial mapping from feature variables to values. Simple
variables can be either X{bound} (i.e., assigned a value), or
X{unbound} (i.e., left unspecified). Aliased variables can
additionally be X{inconsistant} (i.e., assigned multiple
incompatible values).
@ivar _bindings: A dictionary mapping from bound variables
to their values.
"""
def __init__(self, initial_bindings=None):
"""
Construct a new set of bindings.
@param initial_bindings: A dictionary from variables to
values, specifying the initial assignments for the bound
variables.
"""
# Check that variables are not used as values.
if initial_bindings is None: initial_bindings = {}
for val in initial_bindings.values():
if isinstance(val, FeatureVariable):
err = 'Variables cannot be bound to other variables'
raise ValueError(err)
self._bindings = initial_bindings.copy()
def bound_variables(self):
"""
@return: A list of all simple variables that have been
assigned values.
@rtype: C{list} of L{FeatureVariable}
"""
return self._bindings.keys()
def is_bound(self, variable):
"""
@return: True if the given variable is bound. A simple
variable is bound if it has been assigned a value. An aliased
variable is bound if at least one of its aliases is bound
and all of its bound aliases are assigned the same value.
@rtype: C{bool}
"""
if isinstance(variable, AliasedFeatureVariable):
bindings = [self._bindings.get(v)
for v in variable.aliases()
if self._bindings.has_key(v)]
if len(bindings) == 0: return 0
inconsistant = [val for val in bindings if val != bindings[0]]
if inconsistant: return 0
return 1
return self._bindings.has_key(variable)
def lookup(self, variable, update_aliased_bindings=False):
"""
@return: The value that it assigned to the given variable, if
it's bound; or the variable itself if it's unbound. The value
assigned to an aliased variable is defined as the value that's
assigned to its bound aliases.
@param update_aliased_bindings: If true, then looking up a
bound aliased variable will cause any unbound aliases
it has to be bound to its value. E.g., if C{?x} is bound
to C{1} and C{?y} is unbound, then looking up C{?x=y} will
cause C{?y} to be bound to C{1}.
@raise ValueError: If C{variable} is an aliased variable with an
inconsistant value (i.e., if two or more of its bound
aliases are assigned different values).
"""
# If it's an aliased variable, then we need to check that the
# bindings of all of its aliases are consistant.
if isinstance(variable, AliasedFeatureVariable):
# Get a list of all bindings.
bindings = [self._bindings.get(v)
for v in variable.aliases()
if self._bindings.has_key(v)]
# If it's unbound, return the (aliased) variable.
if len(bindings) == 0: return variable
# Make sure all the bindings are equal.
val = bindings[0]
for binding in bindings[1:]:
if binding != val:
raise ValueError('inconsistant value')
# Set any unbound aliases, if requested
if update_aliased_bindings:
for subvar in variable.aliases():
self._bindings[subvar] = val
# Return the value.
return val
return self._bindings.get(variable, variable)
def bind(self, variable, value):
"""
Assign a value to a variable. If C{variable} is an aliased
variable, then the value is assigned to all of its
aliases. Variables can only be bound to values; they may
not be bound to other variables.
@raise ValueError: If C{value} is a variable.
"""
if isinstance(value, FeatureVariable):
raise ValueError('Variables cannot be bound to other variables')
if isinstance(variable, AliasedFeatureVariable):
for subvar in variable.aliases():
self._bindings[subvar] = value
else:
self._bindings[variable] = value
def copy(self):
"""
@return: a copy of this set of bindings.
"""
return FeatureBindings(self._bindings)
def __repr__(self):
"""
@return: a string representation of this set of bindings.
"""
if self._bindings:
bindings = ['%r=%r' % (k,v) for (k,v) in self._bindings.items()]
return '<Bindings: %s>' % (', '.join(bindings))
else:
return '<Bindings (empty)>'
def __cmp__(self, other):
if not isinstance(other, FeatureVariable): return -1
return cmp((self._bindings, self._synonyms),
(other._bindings, other._synonyms))
# Feature structures use identity-based-equality.
class FeatureStructure(object):
"""
A structured set of features. These features are represented as a
mapping from feature names to feature values, where each feature
value is either a basic value (such as a string or an integer), or
a nested feature structure.
A feature structure's feature values can be accessed via indexing:
>>> fstruct1 = FeatureStructure(number='singular', person='3rd')
>>> print fstruct1['number']
'singular'
>>> fstruct2 = FeatureStructure(subject=fstruct1)
>>> print fstruct2['subject']['person']
'3rd'
A nested feature value can be also accessed via a X{feature
paths}, or a tuple of feature names that specifies the paths to
the nested feature:
>>> fpath = ('subject','number')
>>> print fstruct2[fpath]
'singular'
Feature structures may contain reentrant feature values. A
X{reentrant feature value} is a single feature value that can be
accessed via multiple feature paths.
@note: Should I present them as DAGs instead? That would make it
easier to explain reentrancy.
@ivar _features: A dictionary mapping from feature names to values.
@ivar _forward: A pointer to another feature structure that
replaced this feature structure. This is used during the
unification process to preserve reentrance. In particular, if
we're unifying feature structures A and B, where:
- x and y are feature paths.
- A contains a feature structure A[x]
- B contains a reentrant feature structure B[x]=B[y]
Then we need to ensure that in the unified structure C,
C[x]=C[y]. (Here the equals sign is used to denote the object
identity relation, i.e., C{is}.)
"""
def __init__(self, **features):
self._features = features
def __getitem__(self, index):
if type(index) == str:
return self._features[index]
elif len(index) == 0:
return self
elif len(index) == 1:
return self._features[index[0]]
elif isinstance(self._features[index[0]], FeatureStructure):
return self._features[index[0]][index[1:]]
else:
raise IndexError('Bad feature path')
def feature_names(self):
"""
@return: A list of the names of the features whose values are
defined by this feature structure.
@rtype: C{list} of C{string}
"""
return self._features.keys()
def equal_values(self, other, check_reentrance=False):
"""
@return: True if C{self} and C{other} assign the same value to
to every feature. In particular, return true if
C{self[M{p}]==other[M{p}]} for every feature path M{p} such
that C{self[M{p}]} or C{other[M{p}]} is a base value (i.e.,
not a nested feature structure).
Note that this is a weaker equality test than L{==<__eq__>},
which tests for equal identity.
@param check_reentrance: If true, then any difference in the
reentrance relations between C{self} and C{other} will
cause C{equal_values} to return false.
"""
if not isinstance(other, FeatureStructure): return 0
if check_reentrance: return `self` == `other`
if len(self._features) != len(other._features): return 0
for (fname, selfval) in self._features.items():
otherval = other._features[fname]
if isinstance(selfval, FeatureStructure):
if not selfval.equal_values(otherval): return 0
else:
if selfval != otherval: return 0
return 1
def __eq__(self, other):
"""
@return: True if C{self} is the same object as C{other}. This
very strict equality test is necessary because object identity
is used to distinguish reentrant objects from non-reentrant
ones.
"""
return self is other
def __hash__(self):
return id(self)
def deepcopy(self, memo=None):
"""
@return: a new copy of this feature structure.
@param memo: The memoization dicationary, which should
typically be left unspecified.
"""
# Check the memoization dictionary.
if memo is None: memo = {}
memo_copy = memo.get(id(self))
if memo_copy is not None: return memo_copy
# Create a new copy. Do this *before* we fill out its
# features, in case of cycles.
newcopy = FeatureStructure()
memo[id(self)] = newcopy
features = newcopy._features
# Fill out the features.
for (fname, fval) in self._features.items():
if isinstance(fval, FeatureStructure):
features[fname] = fval.deepcopy(memo)
else:
features[fname] = fval
return newcopy
def reentrances(self):
"""
@return: A list of all feature structures that can be reached
from C{self} by multiple feature paths.
@rtype: C{list} of L{FeatureStructure}
"""
reentrance_dict = self._find_reentrances({})
return [struct for (struct, reentrant) in reentrance_dict.items()
if reentrant]
#################################################################
## Variables
#################################################################
def apply_bindings(self, bindings):
"""
@return: The feature structure that is obtained by replacing
each variable bound by C{bindings} with its values. If
C{self} contains an aliased variable that is partially bound by
C{bindings}, then that variable's unbound aliases will be
bound to its value. E.g., if the bindings C{<?x=1>} are
applied to the feature structure C{[A = ?<x=y>]}, then the
bindings will be updated to C{<?x=1,?y=1>}.
@rtype: L{FeatureStructure}
"""
selfcopy = self.deepcopy()
selfcopy._apply_bindings(bindings, {})
return selfcopy
def rename_variables(self, newvars=None):
"""
@return: The feature structure that is obtained by replacing
each variable in this feature structure with a new variable
that has a unique identifier.
@param newvars: A dictionary that is used to hold the mapping
from old variables to new variables. For each variable M{v}
in this feature structure:
- If C{newvars} maps M{v} to M{v'}, then M{v} will be
replaced by M{v'}.
- If C{newvars} does not contain M{v}, then a new entry
will be added to C{newvars}, mapping M{v} to the new
variable that is used to replace it.
To consistantly rename the variables in a set of feature
structures, simply apply rename_variables to each one, using
the same dictionary:
>>> newvars = {} # Maps old vars to alpha-renamed vars
>>> new_fstruct1 = ftruct1.rename_variables(newvars)
>>> new_fstruct2 = ftruct2.rename_variables(newvars)
>>> new_fstruct3 = ftruct3.rename_variables(newvars)
If newvars is not specified, then an empty dictionary is used.
@type newvars: C{dictionary} from L{FeatureStructureVariable}
to L{FeatureStructureVariable}
@rtype: L{FeatureStructure}
"""
if newvars is None: newvars = {}
selfcopy = self.deepcopy()
selfcopy._rename_variables(newvars, {})
return selfcopy
def _apply_bindings(self, bindings, visited):
# Visit each node only once:
if visited.has_key(id(self)): return
visited[id(self)] = 1
for (fname, fval) in self._features.items():
if isinstance(fval, SubstituteBindingsI):
self._features[fname] = fval.substitute_bindings(bindings)
if isinstance(fval, FeatureStructure):
fval._apply_bindings(bindings, visited)
def _rename_variables(self, newvars, visited):
# Visit each node only once:
if visited.has_key(id(self)): return
visited[id(self)] = 1
for (fname, fval) in self._features.items():
if isinstance(fval, FeatureVariable):
if not newvars.has_key(fval):
newvars[fval] = FeatureVariable()
self._features[fname] = newvars[fval]
elif isinstance(fval, FeatureStructure):
fval._rename_variables(newvars, visited)
#################################################################
## Unification
#################################################################
# The basic unification algorithm:
# 1. Make copies of self and other (preserving reentrance)
# 2. Destructively unify self and other
# 3. Apply forward pointers, to preserve reentrance.
# 4. Find any partially bound aliased variables, and bind them.
# 5. Replace bound variables with their values.
def unify(self, other, bindings=None, trace=False):
"""
Unify C{self} with C{other}, and return the resulting feature
structure. This unified feature structure is the minimal
feature structure that:
- contains all feature value assignments from both C{self}
and C{other}.
- preserves all reentrance properties of C{self} and
C{other}.
If no such feature structure exists (because C{self} and
C{other} specify incompatible values for some feature), then
unification fails, and C{unify} returns C{None}.
@param bindings: A set of variable bindings to be used and
updated during unification. Bound variables are
treated as if they were replaced by their values. Unbound
variables are bound if they are unified with values; or
aliased if they are unified with other unbound variables.
If C{bindings} is unspecified, then all variables are
assumed to be unbound.
"""
if trace: print '\nUnification trace:'
# If bindings are unspecified, use an empty set of bindings.
if bindings is None: bindings = FeatureBindings()
# Make copies of self & other (since the unification algorithm
# is destructive). Use the same memo, to preserve reentrance
# links between self and other.
memo = {}
selfcopy = self.deepcopy(memo)
othercopy = other.deepcopy(memo)
# Preserve reentrance links from bound variables into either
# self or other.
for var in bindings.bound_variables():
valid = id(bindings.lookup(var))
if memo.has_key(valid):
bindings.bind(var, memo[valid])
# Do the actual unification. If it fails, return None.
try: selfcopy._destructively_unify(othercopy, bindings, trace)
except FeatureStructure._UnificationFailureError: return None
# Replace any feature structure that has a forward pointer
# with the target of its forward pointer.
selfcopy._apply_forwards_to_bindings(bindings)
selfcopy._apply_forwards(visited={})
# Find any partially bound aliased variables, and bind their
# unbound aliases.
selfcopy._rebind_aliased_variables(bindings, visited={})
# Replace bound vars with values.
selfcopy._apply_bindings(bindings, visited={})
# Return the result.
return selfcopy
class _UnificationFailureError(Exception):
""" An exception that is used by C{_destructively_unify} to
abort unification when a failure is encountered. """
# unify a cyclic self with another structure???
def _destructively_unify(self, other, bindings, trace=False,
ci_str_cmp=False, depth=0):
"""
Attempt to unify C{self} and C{other} by modifying them
in-place. If the unification succeeds, then C{self} will
contain the unified value, and the value of C{other} is
undefined. If the unification fails, then a
_UnificationFailureError is raised, and the values of C{self}
and C{other} are undefined.
"""
if trace:
# apply_forwards to get reentrancy links right:
self._apply_forwards({})
other._apply_forwards({})
print ' '+'| '*depth+' /'+`self`
print ' '+'| '*depth+'|\\'+ `other`
# Look up the "cannonical" copy of other.
while hasattr(other, '_forward'): other = other._forward
# If self is already identical to other, we're done.
# Note: this, together with the forward pointers, ensures
# that unification will terminate even for cyclic structures.
# [XX] Verify/prove this?
if self is other:
if trace:
print ' '+'| '*depth+'|'
print ' '+'| '*depth+'| (identical objects)'
print ' '+'| '*depth+'|'
print ' '+'| '*depth+'+-->'+`self`
return
# Set other's forward pointer to point to self; this makes us
# into the cannonical copy of other.
other._forward = self
for (fname, otherval) in other._features.items():
if trace:
trace_otherval = otherval
trace_selfval_defined = self._features.has_key(fname)
trace_selfval = self._features.get(fname)
if self._features.has_key(fname):
selfval = self._features[fname]
# If selfval or otherval is a bound variable, then
# replace it by the variable's bound value.
if isinstance(selfval, FeatureVariable):
selfval = bindings.lookup(selfval)
if isinstance(otherval, FeatureVariable):
otherval = bindings.lookup(otherval)
if trace:
print ' '+'| '*(depth+1)
print ' '+'%s| Unify %s feature:'%('| '*(depth),fname)
# Case 1: unify 2 feature structures (recursive case)
if (isinstance(selfval, FeatureStructure) and
isinstance(otherval, FeatureStructure)):
selfval._destructively_unify(otherval, bindings,
trace, depth+1)
# Case 2: unify 2 variables
elif (isinstance(selfval, FeatureVariable) and
isinstance(otherval, FeatureVariable)):
self._features[fname] = selfval.alias(otherval)
# Case 3: unify a variable with a value
elif isinstance(selfval, FeatureVariable):
bindings.bind(selfval, otherval)
elif isinstance(otherval, FeatureVariable):
bindings.bind(otherval, selfval)
# Case 4A: unify two strings, case-insensitively.
elif ci_str_cmp and \
isinstance(selfval, str) and isinstance(otherval, str)\
and selfval.upper() == otherval.upper():
pass
# Case 4: unify 2 non-equal values (failure case)
elif selfval != otherval:
if trace: print ' '+'| '*depth + 'X <-- FAIL'
raise FeatureStructure._UnificationFailureError()
# Case 5: unify 2 equal values
else: pass
if trace and not isinstance(selfval, FeatureStructure):
# apply_forwards to get reentrancy links right:
if isinstance(trace_selfval, FeatureStructure):
trace_selfval._apply_forwards({})
if isinstance(trace_otherval, FeatureStructure):
trace_otherval._apply_forwards({})
print ' '+'%s| /%r' % ('| '*(depth), trace_selfval)
print ' '+'%s| |\\%r' % ('| '*(depth), trace_otherval)
print ' '+'%s| +-->%r' % ('| '*(depth),
self._features[fname])
# Case 5: copy from other
else:
self._features[fname] = otherval
if trace:
# apply_forwards to get reentrancy links right:
self._apply_forwards({})
print ' '+'| '*depth+'|'
print ' '+'| '*depth+'+-->'+`self`
if len(bindings.bound_variables()) > 0:
print ' '+'| '*depth+' '+`bindings`
def _apply_forwards_to_bindings(self, bindings):
"""
Replace any feature structure that has a forward pointer with
the target of its forward pointer (to preserve reentrancy).
"""
for var in bindings.bound_variables():
value = bindings.lookup(var)
if (isinstance(value, FeatureStructure) and
hasattr(value, '_forward')):
while hasattr(value, '_forward'):
value = value._forward
bindings.bind(var, value)
def _apply_forwards(self, visited):
"""
Replace any feature structure that has a forward pointer with
the target of its forward pointer (to preserve reentrancy).
"""
# Visit each node only once:
if visited.has_key(id(self)): return
visited[id(self)] = 1
for fname, fval in self._features.items():
if isinstance(fval, FeatureStructure):
while hasattr(fval, '_forward'):
fval = fval._forward
self._features[fname] = fval
fval._apply_forwards(visited)
def _rebind_aliased_variables(self, bindings, visited):
# Visit each node only once:
if visited.has_key(id(self)): return
visited[id(self)] = 1
for (fname, fval) in self._features.items():
if isinstance(fval, AliasedFeatureVariable):
bindings.lookup(fval, True)
elif isinstance(fval, FeatureStructure):
fval._rebind_aliased_variables(bindings, visited)
def subsumes(self, other):
"""
Check if this feature structure subsumes another feature structure.
"""
return other.equal_values(self.unify(other))
#################################################################
## String Representations
#################################################################
def __repr__(self):
"""
Display a single-line representation of this feature structure,
suitable for embedding in other representations.
"""
return self._repr(self._find_reentrances({}), {})
def __str__(self):
"""
Display a multi-line representation of this feature structure
as an FVM (feature value matrix).
"""
return '\n'.join(self._str(self._find_reentrances({}), {}))
def _repr(self, reentrances, reentrance_ids):
"""
@return: A string representation of this feature structure.
@param reentrances: A dictionary that maps from the C{id} of
each feature value in self, indicating whether that value
is reentrant or not.
@param reentrance_ids: A dictionary mapping from the C{id}s
of feature values to unique identifiers. This is modified
by C{repr}: the first time a reentrant feature value is
displayed, an identifier is added to reentrance_ids for
it.
"""
segments = []
# If this is the first time we've seen a reentrant structure,
# then assign it a unique identifier.
if reentrances[id(self)]:
assert not reentrance_ids.has_key(id(self))
reentrance_ids[id(self)] = `len(reentrance_ids)+1`
items = self._features.items()
items.sort() # sorting note: keys are unique strings, so we'll
# never fall through to comparing values.
for (fname, fval) in items:
if not isinstance(fval, FeatureStructure):
segments.append('%s=%r' % (fname, fval))
elif reentrance_ids.has_key(id(fval)):
segments.append('%s->(%s)' % (fname,
reentrance_ids[id(fval)]))
else:
fval_repr = fval._repr(reentrances, reentrance_ids)
segments.append('%s=%s' % (fname, fval_repr))
# If it's reentrant, then add on an identifier tag.
if reentrances[id(self)]:
return '(%s)[%s]' % (reentrance_ids[id(self)],
', '.join(segments))
else:
return '[%s]' % (', '.join(segments))
def _str(self, reentrances, reentrance_ids):
"""
@return: A list of lines composing a string representation of
this feature structure.
@param reentrances: A dictionary that maps from the C{id} of
each feature value in self, indicating whether that value
is reentrant or not.
@param reentrance_ids: A dictionary mapping from the C{id}s
of feature values to unique identifiers. This is modified
by C{repr}: the first time a reentrant feature value is
displayed, an identifier is added to reentrance_ids for
it.
"""
# If this is the first time we've seen a reentrant structure,
# then tack on an id string.
if reentrances[id(self)]:
assert not reentrance_ids.has_key(id(self))
reentrance_ids[id(self)] = `len(reentrance_ids)+1`
# Special case:
if len(self._features) == 0:
if reentrances[id(self)]:
return ['(%s) []' % reentrance_ids[id(self)]]
else:
return ['[]']
# What's the longest feature name? Use this to align names.
maxfnamelen = max([len(k) for k in self.feature_names()])
lines = []
items = self._features.items()
items.sort() # sorting note: keys are unique strings, so we'll
# never fall through to comparing values.
for (fname, fval) in items:
if not isinstance(fval, FeatureStructure):
# It's not a nested feature structure -- just print it.
lines.append('%s = %r' % (fname.ljust(maxfnamelen), fval))
elif reentrance_ids.has_key(id(fval)):
# It's a feature structure we've seen before -- print
# the reentrance id.
lines.append('%s -> (%s)' % (fname.ljust(maxfnamelen),
reentrance_ids[id(fval)]))
else:
# It's a new feature structure. Separate it from
# other values by a blank line.
if lines and lines[-1] != '': lines.append('')
# Recursively print the feature's value (fval).
fval_lines = fval._str(reentrances, reentrance_ids)
# Indent each line to make room for fname.
fval_lines = [(' '*(maxfnamelen+3))+l for l in fval_lines]
# Pick which line we'll display fname on.
nameline = (len(fval_lines)-1)/2
fval_lines[nameline] = (
fname.ljust(maxfnamelen)+' ='+
fval_lines[nameline][maxfnamelen+2:])
# Add the feature structure to the output.
lines += fval_lines
# Separate FeatureStructures by a blank line.
lines.append('')
# Get rid of any excess blank lines.
if lines[-1] == '': lines = lines[:-1]
# Add brackets around everything.
maxlen = max([len(line) for line in lines])
lines = ['[ %s%s ]' % (line, ' '*(maxlen-len(line))) for line in lines]
# If it's reentrant, then add on an identifier tag.
if reentrances[id(self)]:
idstr = '(%s) ' % reentrance_ids[id(self)]
lines = [(' '*len(idstr))+l for l in lines]
idline = (len(lines)-1)/2
lines[idline] = idstr + lines[idline][len(idstr):]
return lines
# Walk through the feature tree. The first time we see a feature
# value, map it to False (not reentrant). If we see a feature
# value more than once, then map it to C{True} (reentrant).
def _find_reentrances(self, reentrances):
"""
Find all of the feature values contained by self that are
reentrant (i.e., that can be reached by multiple paths through
feature structure's features). Return a dictionary
C{reentrances} that maps from the C{id} of each feature value
to a boolean value, indicating whether it is reentrant or not.
"""
if reentrances.has_key(id(self)):
# We've seen it more than once.
reentrances[id(self)] = True
else:
# This is the first time we've seen it.
reentrances[id(self)] = False
# Recurse to contained feature structures.
for fval in self._features.values():
if isinstance(fval, FeatureStructure):
fval._find_reentrances(reentrances)
return reentrances
#################################################################
## Parsing
#################################################################
# [classmethod]
def parse(cls, s):
"""
Convert a string representation of a feature structure (as
displayed by repr) into a C{FeatureStructure}. This parse
imposes the following restrictions on the string
representation:
- Feature names cannot contain any of the following:
whitespace, parenthases, quote marks, equals signs,
dashes, and square brackets.
- Only the following basic feature value are supported:
strings, integers, variables, C{None}, and unquoted
alphanumeric strings.
- For reentrant values, the first mention must specify
a reentrance identifier and a value; and any subsequent
mentions must use arrows (C{'->'}) to reference the
reentrance identifier.
"""
try:
value, position = cls._parse(s, 0, {})
except ValueError, e:
estr = ('Error parsing field structure\n\n ' +
s + '\n ' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
raise ValueError, estr
if position != len(s): raise ValueError()
return value
# Regular expressions for parsing.
_PARSE_RE = {'name': re.compile(r'\s*([^\s\(\)"\'\-=\[\]]+)\s*'),
'ident': re.compile(r'\s*\((\d+)\)\s*'),
'reentrance': re.compile(r'\s*->\s*'),
'assign': re.compile(r'\s*=\s*'),
'bracket': re.compile(r'\s*]\s*'),
'comma': re.compile(r'\s*,\s*'),
'none': re.compile(r'None(?=\s|\]|,)'),
'int': re.compile(r'-?\d+(?=\s|\]|,)'),
'var': re.compile(r'\?[a-zA-Z_][a-zA-Z0-9_]*'+'|'+
r'\?<[a-zA-Z_][a-zA-Z0-9_]*'+
r'(=[a-zA-Z_][a-zA-Z0-9_]*)*>'),
'symbol': re.compile(r'\w+'),
'stringmarker': re.compile("['\"\\\\]")}
# [classmethod]
def _parse(cls, s, position=0, reentrances=None):
"""
Helper function that parses a feature structure.
@param s: The string to parse.
@param position: The position in the string to start parsing.
@param reentrances: A dictionary from reentrance ids to values.
@return: A tuple (val, pos) of the feature structure created
by parsing and the position where the parsed feature
structure ends.
"""
# A set of useful regular expressions (precompiled)
_PARSE_RE = cls._PARSE_RE
# Check that the string starts with an open bracket.
if s[position] != '[': raise ValueError('open bracket', position)
position += 1
# If it's immediately followed by a close bracket, then just
# return an empty feature structure.
match = _PARSE_RE['bracket'].match(s, position)
if match is not None: return cls(), match.end()
# Build a list of the features defined by the structure.
# Each feature has one of the three following forms:
# name = value
# name (id) = value
# name -> (target)
features = {}
while position < len(s):
# Use these variables to hold info about the feature:
name = id = target = val = None
# Find the next feature's name.
match = _PARSE_RE['name'].match(s, position)
if match is None: raise ValueError('feature name', position)
name = match.group(1)
position = match.end()
# Check for a reentrance link ("-> (target)")
match = _PARSE_RE['reentrance'].match(s, position)
if match is not None:
position = match.end()
match = _PARSE_RE['ident'].match(s, position)
if match is None: raise ValueError('identifier', position)
target = match.group(1)
position = match.end()
try: features[name] = reentrances[target]
except: raise ValueError('bound identifier', position)
# If it's not a reentrance link, it must be an assignment.
else:
match = _PARSE_RE['assign'].match(s, position)
if match is None: raise ValueError('equals sign', position)
position = match.end()
# Find the feature's id (if specified)
match = _PARSE_RE['ident'].match(s, position)
if match is not None:
id = match.group(1)
if reentrances.has_key(id):
raise ValueError('new identifier', position+1)
position = match.end()
val, position = cls._parseval(s, position, reentrances)
features[name] = val
if id is not None:
reentrances[id] = val
# Check for a close bracket
match = _PARSE_RE['bracket'].match(s, position)
if match is not None:
return cls(**features), match.end()
# Otherwise, there should be a comma
match = _PARSE_RE['comma'].match(s, position)
if match is None: raise ValueError('comma', position)
position = match.end()
# We never saw a close bracket.
raise ValueError('close bracket', position)
# [classmethod]
def _parseval(cls, s, position, reentrances):
"""
Helper function that parses a feature value. Currently
supports: None, integers, variables, strings, nested feature
structures.
@param s: The string to parse.
@param position: The position in the string to start parsing.
@param reentrances: A dictionary from reentrance ids to values.
@return: A tuple (val, pos) of the value created by parsing
and the position where the parsed value ends.
"""
# A set of useful regular expressions (precompiled)
_PARSE_RE = cls._PARSE_RE
# End of string (error)
if position == len(s): raise ValueError('value', position)
# String value
if s[position] in "'\"":
start = position
quotemark = s[position:position+1]
position += 1
while 1:
match = _PARSE_RE['stringmarker'].search(s, position)
if not match: raise ValueError('close quote', position)
position = match.end()
if match.group() == '\\': position += 1
elif match.group() == quotemark:
return eval(s[start:position]), position
# Nested feature structure
if s[position] == '[':
return cls._parse(s, position, reentrances)
# Variable
match = _PARSE_RE['var'].match(s, position)
if match is not None:
return FeatureVariable.parse(match.group()), match.end()
# None
match = _PARSE_RE['none'].match(s, position)
if match is not None:
return None, match.end()
# Integer value
match = _PARSE_RE['int'].match(s, position)
if match is not None:
return int(match.group()), match.end()
# Alphanumeric symbol (must be checked after integer)
match = _PARSE_RE['symbol'].match(s, position)
if match is not None:
return match.group(), match.end()
# We don't know how to parse this value.
raise ValueError('value', position)
_parseval=classmethod(_parseval)
_parse=classmethod(_parse)
parse=classmethod(parse)
#//////////////////////////////////////////////////////////////////////
# TESTING...
#//////////////////////////////////////////////////////////////////////
import unittest
# Note: since FeatureStructure.__repr__() sorts by keys before
# displaying, there is a single unique string repr for each
# FeatureStructure.
class FeatureStructureTestCase(unittest.TestCase):
'Unit testing for FeatureStructure'
def testUnification(self):
'Basic unification tests'
# Copying from self to other.
fs1 = FeatureStructure(number='singular')
fs2 = fs1.unify(FeatureStructure())
self.failUnlessEqual(repr(fs2), "[number='singular']")
# Copying from other to self
fs1 = FeatureStructure()
fs2 = fs1.unify(FeatureStructure(number='singular'))
self.failUnlessEqual(repr(fs2), "[number='singular']")
# Cross copying
fs1 = FeatureStructure(number='singular')
fs2 = fs1.unify(FeatureStructure(person=3))
self.failUnlessEqual(repr(fs2), "[number='singular', person=3]")
# Merging a nested structure
fs1 = FeatureStructure.parse('[A=[B=b]]')
fs2 = FeatureStructure.parse('[A=[C=c]]')
fs3 = fs1.unify(fs2)
self.failUnlessEqual(repr(fs3), "[A=[B='b', C='c']]")
def testReentrantUnification(self):
'Reentrant unification tests'
# A basic case of reentrant unification
fs1 = FeatureStructure.parse('[A=(1)[B=b], E=[F->(1)]]')
fs2 = FeatureStructure.parse("[A=[C='c'], E=[F=[D='d']]]")
fs3 = fs1.unify(fs2)
fs3repr = "[A=(1)[B='b', C='c', D='d'], E=[F->(1)]]"
self.failUnlessEqual(repr(fs3), fs3repr)
fs3 = fs2.unify(fs1) # Try unifying both ways.
self.failUnlessEqual(repr(fs3), fs3repr)
# More than 2 paths to a value
fs1 = FeatureStructure.parse("[a=[],b=[],c=[],d=[]]")
fs2 = FeatureStructure.parse('[a=(1)[], b->(1), c->(1), d->(1)]')
fs3 = fs1.unify(fs2)
self.failUnlessEqual(repr(fs3), '[a=(1)[], b->(1), c->(1), d->(1)]')
# fs1[a] gets unified with itself:
fs1 = FeatureStructure.parse('[x=(1)[], y->(1)]')
fs2 = FeatureStructure.parse('[x=(1)[], y->(1)]')
fs3 = fs1.unify(fs2)
def testVariableForwarding(self):
'Bound variables should get forwarded appropriately'
fs1 = FeatureStructure.parse('[A=(1)[X=x], B->(1), C=?cvar, D=?dvar]')
fs2y = FeatureStructure(Y='y')
fs2z = FeatureStructure(Z='z')
fs2 = FeatureStructure.parse('[A=(1)[Y=y], B=(2)[Z=z], C->(1), D->(2)]')
fs3 = fs1.unify(fs2)
fs3repr = ("[A=(1)[X='x', Y='y', Z='z'], B->(1), C->(1), D->(1)]")
self.failUnlessEqual(repr(fs3), fs3repr)
def testCyclicStructures(self):
'Cyclic structure tests'
# Create a cyclic structure via unification.
fs1 = FeatureStructure.parse('[F=(1)[], G->(1)]')
fs2 = FeatureStructure.parse('[F=[H=(2)[]], G->(2)]')
fs3 = fs1.unify(fs2)
# Check that we got the value right.
self.failUnlessEqual(repr(fs3), '[F=(1)[H->(1)], G->(1)]')
# Check that we got the cyclicity right.
self.failUnless(fs3['F'] is fs3['G'])
self.failUnless(fs3['F'] is fs3['G', 'H'])
self.failUnless(fs3['F'] is fs3['G', 'H', 'H'])
self.failUnless(fs3['F'] is fs3[('G',)+(('H',)*10)])
# Create a cyclic structure with variables.
x = FeatureVariable('x')
fs1 = FeatureStructure(F=FeatureStructure(H=x))
fs2 = FeatureStructure(F=x)
fs3 = fs1.unify(fs2)
# Check that we got the value right.
self.failUnlessEqual(repr(fs3), '[F=(1)[H->(1)]]')
# Check that we got the cyclicity right.
self.failUnless(fs3['F'] is fs3['F','H'])
self.failUnless(fs3['F'] is fs3['F','H','H'])
self.failUnless(fs3['F'] is fs3[('F',)+(('H',)*10)])
# Cyclic structure as LHS
fs4 = FeatureStructure.parse('[F=[H=[H=[H=(1)[]]]], K->(1)]')
fs5 = fs3.unify(fs4)
self.failUnlessEqual(repr(fs5), '[F=(1)[H->(1)], K->(1)]')
# Cyclic structure as RHS
fs6 = fs4.unify(fs3)
self.failUnlessEqual(repr(fs6), '[F=(1)[H->(1)], K->(1)]')
# LHS and RHS both cyclic
fs7 = fs3.unify(fs3.deepcopy())
def testVariablesPreserveReentrance(self):
'Variable bindings should preserve reentrance.'
bindings = FeatureBindings()
fs1 = FeatureStructure.parse("[a=?x]")
fs2 = fs1.unify(FeatureStructure.parse("[a=[]]"), bindings)
fs3 = fs2.unify(FeatureStructure.parse("[b=?x]"), bindings)
self.failUnlessEqual(repr(fs3), '[a=(1)[], b->(1)]')
def testVariableMerging(self):
'Aliased variable tests'
fs1 = FeatureStructure.parse("[a=?x, b=?x]")
fs2 = fs1.unify(FeatureStructure.parse("[b=?y, c=?y]"))
self.failUnlessEqual(repr(fs2), '[a=?x, b=?<x=y>, c=?y]')
fs3 = fs2.unify(FeatureStructure.parse("[a=1]"))
self.failUnlessEqual(repr(fs3), '[a=1, b=1, c=1]')
fs1 = FeatureStructure.parse("[a=1]")
fs2 = FeatureStructure.parse("[a=?x, b=?x]")
fs3 = fs2.unify(fs1)
self.failUnlessEqual(repr(fs3), '[a=1, b=1]')
def testsuite():
t1 = unittest.makeSuite(FeatureStructureTestCase)
return unittest.TestSuite( (t1,) )
def test(verbosity):
runner = unittest.TextTestRunner(verbosity=verbosity)
runner.run(testsuite())
#//////////////////////////////////////////////////////////////////////
# Demo..
#//////////////////////////////////////////////////////////////////////
def display_unification(fs1, fs2, indent=' '):
# Print the two input feature structures, side by side.
fs1_lines = str(fs1).split('\n')
fs2_lines = str(fs2).split('\n')
if len(fs1_lines) > len(fs2_lines):
blankline = '['+' '*(len(fs2_lines[0])-2)+']'
fs2_lines += [blankline]*len(fs1_lines)
else:
blankline = '['+' '*(len(fs1_lines[0])-2)+']'
fs1_lines += [blankline]*len(fs2_lines)
for (fs1_line, fs2_line) in zip(fs1_lines, fs2_lines):
print indent + fs1_line + ' ' + fs2_line
print indent+'-'*len(fs1_lines[0])+' '+'-'*len(fs2_lines[0])
linelen = len(fs1_lines[0])*2+3
print indent+'| |'.center(linelen)
print indent+'+-----UNIFY-----+'.center(linelen)
print indent+'|'.center(linelen)
print indent+'V'.center(linelen)
bindings = FeatureBindings()
result = fs1.unify(fs2, bindings)
if result is None:
print indent+'(FAILED)'.center(linelen)
else:
print '\n'.join([indent+l.center(linelen)
for l in str(result).split('\n')])
if bindings and len(bindings.bound_variables()) > 0:
print repr(bindings).center(linelen)
return result
def demo(trace=False):
import random, sys
HELP = '''
1-%d: Select the corresponding feature structure
q: Quit
t: Turn tracing on or off
l: List all feature structures
?: Help
'''
print '''
This demo will repeatedly present you with a list of feature
structures, and ask you to choose two for unification. Whenever a
new feature structure is generated, it is added to the list of
choices that you can pick from. However, since this can be a
large number of feature structures, the demo will only print out a
random subset for you to choose between at a given time. If you
want to see the complete lists, type "l". For a list of valid
commands, type "?".
'''
print 'Press "Enter" to continue...'
sys.stdin.readline()
fstruct_strings = [
'[agr=[number=sing, gender=masc]]',
'[agr=[gender=masc, person=3rd]]',
'[agr=[gender=fem, person=3rd]]',
'[subj=[agr=(1)[]], agr->(1)]',
'[obj=?x]', '[subj=?x]',
'[/=None]', '[/=NP]',
'[cat=NP]', '[cat=VP]', '[cat=PP]',
'[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]',
'[gender=masc, agr=?C]',
'[gender=?S, agr=[gender=?S,person=3rd]]'
]
all_fstructs = [(i, FeatureStructure.parse(fstruct_strings[i]))
for i in range(len(fstruct_strings))]
def list_fstructs(fstructs):
for i, fstruct in fstructs:
print
lines = str(fstruct).split('\n')
print '%3d: %s' % (i+1, lines[0])
for line in lines[1:]: print ' '+line
print
while 1:
# Pick 5 feature structures at random from the master list.
MAX_CHOICES = 5
if len(all_fstructs) > MAX_CHOICES:
fstructs = random.sample(all_fstructs, MAX_CHOICES)
fstructs.sort()
else:
fstructs = all_fstructs
print '_'*75
print 'Choose two feature structures to unify:'
list_fstructs(fstructs)
selected = [None,None]
for (nth,i) in (('First',0), ('Second',1)):
while selected[i] is None:
print ('%s feature structure (1-%d,q,t,l,?): '
% (nth, len(all_fstructs))),
try:
input = sys.stdin.readline().strip()
if input in ('q', 'Q', 'x', 'X'): return
if input in ('t', 'T'):
trace = not trace
print ' Trace = %s' % trace
continue
if input in ('h', 'H', '?'):
print HELP % len(fstructs); continue
if input in ('l', 'L'):
list_fstructs(all_fstructs); continue
num = int(input)-1
selected[i] = all_fstructs[num][1]
print
except:
print 'Bad sentence number'
continue
if trace:
result = selected[0].unify(selected[1], trace=1)
else:
result = display_unification(selected[0], selected[1])
if result is not None:
for i, fstruct in all_fstructs:
if `result` == `fstruct`: break
else:
all_fstructs.append((len(all_fstructs), result))
print '\nType "Enter" to continue unifying; or "q" to quit.'
input = sys.stdin.readline().strip()
if input in ('q', 'Q', 'x', 'X'): return
if __name__ == '__main__':
test(verbosity=0)
demo()
| Python |
# Natural Language Toolkit: A Chunk Parser
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for identifying non-overlapping linguistic
groups (such as base noun phrases) in unrestricted text. This task is
called X{chunk parsing} or X{chunking}, and the identified groups are
called X{chunks}. The chunked text is represented using a shallow
tree called a "chunk structure." A X{chunk structure} is a tree
containing tokens and chunks, where each chunk is a subtree containing
only tokens. For example, the chunk structure for base noun phrase
chunks in the sentence "I saw the big dog on the hill" is::
(SENTENCE:
(NP: <I>)
<saw>
(NP: <the> <big> <dog>)
<on>
(NP: <the> <hill>))
To convert a chunk structure back to a list of tokens, simply use the
chunk structure's L{leaves<Tree.leaves>} method.
The C{parser.chunk} module defines L{ChunkI}, a standard interface for
chunking texts; and L{RegexpChunk}, a regular-expression based
implementation of that interface. It uses the L{tree.chunk} and
L{tree.conll_chunk} methods, which tokenize strings containing chunked
and tagged texts. It defines L{ChunkScore}, a utility class for
scoring chunk parsers.
RegexpChunk
===========
C{parse.RegexpChunk} is an implementation of the chunk parser interface
that uses regular-expressions over tags to chunk a text. Its
C{parse} method first constructs a C{ChunkString}, which encodes a
particular chunking of the input text. Initially, nothing is
chunked. C{parse.RegexpChunk} then applies a sequence of
C{RegexpChunkRule}s to the C{ChunkString}, each of which modifies
the chunking that it encodes. Finally, the C{ChunkString} is
transformed back into a chunk structure, which is returned.
C{RegexpChunk} can only be used to chunk a single kind of phrase.
For example, you can use an C{RegexpChunk} to chunk the noun
phrases in a text, or the verb phrases in a text; but you can not
use it to simultaneously chunk both noun phrases and verb phrases in
the same text. (This is a limitation of C{RegexpChunk}, not of
chunk parsers in general.)
RegexpChunkRules
------------------
C{RegexpChunkRule}s are transformational rules that update the
chunking of a text by modifying its C{ChunkString}. Each
C{RegexpChunkRule} defines the C{apply} method, which modifies
the chunking encoded by a C{ChunkString}. The
L{RegexpChunkRule} class itself can be used to implement any
transformational rule based on regular expressions. There are
also a number of subclasses, which can be used to implement
simpler types of rules:
- L{ChunkRule} chunks anything that matches a given regular
expression.
- L{ChinkRule} chinks anything that matches a given regular
expression.
- L{UnChunkRule} will un-chunk any chunk that matches a given
regular expression.
- L{MergeRule} can be used to merge two contiguous chunks.
- L{SplitRule} can be used to split a single chunk into two
smaller chunks.
- L{ExpandLeftRule} will expand a chunk to incorporate new
unchunked material on the left.
- L{ExpandRightRule} will expand a chunk to incorporate new
unchunked material on the right.
Tag Patterns
~~~~~~~~~~~~
C{RegexpChunkRule}s use a modified version of regular
expression patterns, called X{tag patterns}. Tag patterns are
used to match sequences of tags. Examples of tag patterns are::
r'(<DT>|<JJ>|<NN>)+'
r'<NN>+'
r'<NN.*>'
The differences between regular expression patterns and tag
patterns are:
- In tag patterns, C{'<'} and C{'>'} act as parenthases; so
C{'<NN>+'} matches one or more repetitions of C{'<NN>'}, not
C{'<NN'} followed by one or more repetitions of C{'>'}.
- Whitespace in tag patterns is ignored. So
C{'<DT> | <NN>'} is equivalant to C{'<DT>|<NN>'}
- In tag patterns, C{'.'} is equivalant to C{'[^{}<>]'}; so
C{'<NN.*>'} matches any single tag starting with C{'NN'}.
The function L{tag_pattern2re_pattern} can be used to transform
a tag pattern to an equivalent regular expression pattern.
Efficiency
----------
Preliminary tests indicate that C{RegexpChunk} can chunk at a
rate of about 300 tokens/second, with a moderately complex rule
set.
There may be problems if C{RegexpChunk} is used with more than
5,000 tokens at a time. In particular, evaluation of some regular
expressions may cause the Python regular expression engine to
exceed its maximum recursion depth. We have attempted to minimize
these problems, but it is impossible to avoid them completely. We
therefore recommend that you apply the chunk parser to a single
sentence at a time.
Emacs Tip
---------
If you evaluate the following elisp expression in emacs, it will
colorize C{ChunkString}s when you use an interactive python shell
with emacs or xemacs ("C-c !")::
(let ()
(defconst comint-mode-font-lock-keywords
'(("<[^>]+>" 0 'font-lock-reference-face)
("[{}]" 0 'font-lock-function-name-face)))
(add-hook 'comint-mode-hook (lambda () (turn-on-font-lock))))
You can evaluate this code by copying it to a temporary buffer,
placing the cursor after the last close parenthasis, and typing
"C{C-x C-e}". You should evaluate it before running the interactive
session. The change will last until you close emacs.
Unresolved Issues
-----------------
If we use the C{re} module for regular expressions, Python's
regular expression engine generates "maximum recursion depth
exceeded" errors when processing very large texts, even for
regular expressions that should not require any recursion. We
therefore use the C{pre} module instead. But note that C{pre}
does not include Unicode support, so this module will not work
with unicode strings. Note also that C{pre} regular expressions
are not quite as advanced as C{re} ones (e.g., no leftward
zero-length assertions).
@type _VALID_CHUNK_STRING: C{regexp}
@var _VALID_CHUNK_STRING: A regular expression to test whether a chunk
string is valid.
@type _VALID_TAG_PATTERN: C{regexp}
@var _VALID_TAG_PATTERN: A regular expression to test whether a tag
pattern is valid.
"""
from en.parser.nltk_lite.parse import ParseI, AbstractParse
from tree import Tree
from en.parser.nltk_lite import tokenize
import types, re
##//////////////////////////////////////////////////////
## Chunk Parser Interface
##//////////////////////////////////////////////////////
class ChunkParseI(ParseI):
"""
A processing interface for identifying non-overlapping groups in
unrestricted text. Typically, chunk parsers are used to find base
syntactic constituants, such as base noun phrases. Unlike
L{ParseI}, C{ChunkParseI} guarantees that the C{parse} method
will always generate a parse.
"""
def parse(self, tokens):
"""
Find the best chunk structure for the given tokens
and return a tree
@param tokens: The list of (word, tag) tokens to be chunked.
@type tokens: L{list} of L{tuple}
"""
assert 0, "ChunkParseI is an abstract interface"
def parse_n(self, tokens, n=None):
"""
Find a list of the C{n} most likely chunk structures for the
tokens, and return a tree. If there are fewer than C{n}
chunk structures, then find them all. The chunk structures
should be stored in descending order of estimated likelihood.
@type n: C{int}
@param n: The number of chunk structures to generate. At most
C{n} chunk structures will be generated. If C{n} is not
specified, generate all chunk structures.
@type tokens: L{list} of L{tuple}
@param tokens: The list of (word, tag) tokens to be chunked.
"""
assert 0, "ChunkParseI is an abstract interface"
##//////////////////////////////////////////////////////
## Evaluation Helper
##//////////////////////////////////////////////////////
# Patched for increased performance by Yoav Goldberg <yoavg@cs.bgu.ac.il>, 2006-01-13
# -- statistics are evaluated only on demand, instead of at every sentence evaluation
class ChunkScore(object):
"""
A utility class for scoring chunk parsers. C{ChunkScore} can
evaluate a chunk parser's output, based on a number of statistics
(precision, recall, f-measure, misssed chunks, incorrect chunks).
It can also combine the scores from the parsing of multiple texts;
this makes it signifigantly easier to evaluate a chunk parser that
operates one sentence at a time.
Texts are evaluated with the C{score} method. The results of
evaluation can be accessed via a number of accessor methods, such
as C{precision} and C{f_measure}. A typical use of the
C{ChunkScore} class is::
>>> chunkscore = ChunkScore()
>>> for correct in correct_sentences:
... guess = chunkparser.parse(correct.leaves())
... chunkscore.score(correct, guess)
>>> print 'F Measure:', chunkscore.f_measure()
F Measure: 0.823
@ivar kwargs: Keyword arguments:
- max_tp_examples: The maximum number actual examples of true
positives to record. This affects the C{correct} member
function: C{correct} will not return more than this number
of true positive examples. This does *not* affect any of
the numerical metrics (precision, recall, or f-measure)
- max_fp_examples: The maximum number actual examples of false
positives to record. This affects the C{incorrect} member
function and the C{guessed} member function: C{incorrect}
will not return more than this number of examples, and
C{guessed} will not return more than this number of true
positive examples. This does *not* affect any of the
numerical metrics (precision, recall, or f-measure)
- max_fn_examples: The maximum number actual examples of false
negatives to record. This affects the C{missed} member
function and the C{correct} member function: C{missed}
will not return more than this number of examples, and
C{correct} will not return more than this number of true
negative examples. This does *not* affect any of the
numerical metrics (precision, recall, or f-measure)
@type _tp: C{list} of C{Token}
@ivar _tp: List of true positives
@type _fp: C{list} of C{Token}
@ivar _fp: List of false positives
@type _fn: C{list} of C{Token}
@ivar _fn: List of false negatives
@type _tp_num: C{int}
@ivar _tp_num: Number of true positives
@type _fp_num: C{int}
@ivar _fp_num: Number of false positives
@type _fn_num: C{int}
@ivar _fn_num: Number of false negatives.
"""
def __init__(self, **kwargs):
self._correct = set()
self._guessed = set()
self._tp = set()
self._fp = set()
self._fn = set()
self._max_tp = kwargs.get('max_tp_examples', 100)
self._max_fp = kwargs.get('max_fp_examples', 100)
self._max_fn = kwargs.get('max_fn_examples', 100)
self._tp_num = 0
self._fp_num = 0
self._fn_num = 0
self._count = 0
self._measuresNeedUpdate = False
def _updateMeasures(self):
if (self._measuresNeedUpdate):
self._tp = self._guessed & self._correct
self._fn = self._correct - self._guessed
self._fp = self._guessed - self._correct
self._tp_num = len(self._tp)
self._fp_num = len(self._fp)
self._fn_num = len(self._fn)
self._measuresNeedUpdate = False
def score(self, correct, guessed):
"""
Given a correctly chunked sentence, score another chunked
version of the same sentence.
@type correct: chunk structure
@param correct: The known-correct ("gold standard") chunked
sentence.
@type guessed: chunk structure
@param guessed: The chunked sentence to be scored.
"""
self._correct |= _chunksets(correct, self._count)
self._guessed |= _chunksets(guessed, self._count)
self._count += 1
self._measuresNeedUpdate = True
def precision(self):
"""
@return: the overall precision for all texts that have been
scored by this C{ChunkScore}.
@rtype: C{float}
"""
self._updateMeasures()
div = self._tp_num + self._fp_num
if div == 0: return 0
else: return float(self._tp_num) / div
def recall(self):
"""
@return: the overall recall for all texts that have been
scored by this C{ChunkScore}.
@rtype: C{float}
"""
self._updateMeasures()
div = self._tp_num + self._fn_num
if div == 0: return 0
else: return float(self._tp_num) / div
def f_measure(self, alpha=0.5):
"""
@return: the overall F measure for all texts that have been
scored by this C{ChunkScore}.
@rtype: C{float}
@param alpha: the relative weighting of precision and recall.
Larger alpha biases the score towards the precision value,
while smaller alpha biases the score towards the recall
value. C{alpha} should have a value in the range [0,1].
@type alpha: C{float}
"""
self._updateMeasures()
p = self.precision()
r = self.recall()
if p == 0 or r == 0: # what if alpha is 0 or 1?
return 0
return 1/(alpha/p + (1-alpha)/r)
def missed(self):
"""
@rtype: C{list} of chunks
@return: the chunks which were included in the
correct chunk structures, but not in the guessed chunk
structures, listed in input order.
"""
self._updateMeasures()
chunks = list(self._fn)
return [c[1] for c in chunks] # discard position information
def incorrect(self):
"""
@rtype: C{list} of chunks
@return: the chunks which were included in the
guessed chunk structures, but not in the correct chunk
structures, listed in input order.
"""
self._updateMeasures()
chunks = list(self._fp)
return [c[1] for c in chunks] # discard position information
def correct(self):
"""
@rtype: C{list} of chunks
@return: the chunks which were included in the correct
chunk structures, listed in input order.
"""
chunks = list(self._correct)
return [c[1] for c in chunks] # discard position information
def guessed(self):
"""
@rtype: C{list} of chunks
@return: the chunks which were included in the guessed
chunk structures, listed in input order.
"""
chunks = list(self._guessed)
return [c[1] for c in chunks] # discard position information
def __len__(self):
self._updateMeasures()
return self._tp_num + self._fn_num
def __repr__(self):
"""
@rtype: C{String}
@return: a concise representation of this C{ChunkScoring}.
"""
return '<ChunkScoring of '+`len(self)`+' chunks>'
def __str__(self):
"""
@rtype: C{String}
@return: a verbose representation of this C{ChunkScoring}.
This representation includes the precision, recall, and
f-measure scores. For other information about the score,
use the accessor methods (e.g., C{missed()} and
C{incorrect()}).
"""
return ("ChunkParse score:\n" +
(" Precision: %5.1f%%\n" % (self.precision()*100)) +
(" Recall: %5.1f%%\n" % (self.recall()*100))+
(" F-Measure: %5.1f%%" % (self.f_measure()*100)))
def _chunk_toks(self, text):
"""
@return: The list of tokens contained in C{text}.
"""
return [tok for tok in text if isinstance(tok, AbstractTree)]
# extract chunks, and assign unique id, the absolute position of
# the first word of the chunk
def _chunksets(t, count):
pos = 0
chunks = []
for child in t:
if isinstance(child, Tree):
chunks.append(((count, pos), tuple(child.freeze())))
pos += len(child)
else:
pos += 1
return set(chunks)
##//////////////////////////////////////////////////////
## Precompiled regular expressions
##//////////////////////////////////////////////////////
_TAGCHAR = r'[^\{\}<>]'
_TAG = r'(<%s+?>)' % _TAGCHAR
_VALID_TAG_PATTERN = re.compile(r'^((%s|<%s>)*)$' %
('[^\{\}<>]+',
'[^\{\}<>]+'))
##//////////////////////////////////////////////////////
## ChunkString
##//////////////////////////////////////////////////////
class ChunkString(object):
"""
A string-based encoding of a particular chunking of a text.
Internally, the C{ChunkString} class uses a single string to
encode the chunking of the input text. This string contains a
sequence of angle-bracket delimited tags, with chunking indicated
by braces. An example of this encoding is::
{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}<.>{<DT><NN>}<VBD><.>
C{ChunkString} are created from tagged texts (i.e., C{list}s of
C{tokens} whose type is C{TaggedType}). Initially, nothing is
chunked.
The chunking of a C{ChunkString} can be modified with the C{xform}
method, which uses a regular expression to transform the string
representation. These transformations should only add and remove
braces; they should I{not} modify the sequence of angle-bracket
delimited tags.
@type _str: C{string}
@ivar _str: The internal string representation of the text's
encoding. This string representation contains a sequence of
angle-bracket delimited tags, with chunking indicated by
braces. An example of this encoding is::
{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}<.>{<DT><NN>}<VBD><.>
@type _pieces: C{list} of pieces (tagged tokens and chunks)
@ivar _pieces: The tagged tokens and chunks encoded by this C{ChunkString}.
@ivar _debug: The debug level. See the constructor docs.
@cvar IN_CHUNK_PATTERN: A zero-width regexp pattern string that
will only match positions that are in chunks.
@cvar IN_CHINK_PATTERN: A zero-width regexp pattern string that
will only match positions that are in chinks.
"""
IN_CHUNK_PATTERN = r'(?=[^\{]*\})'
IN_CHINK_PATTERN = r'(?=[^\}]*(\{|$))'
# These are used by _verify
_CHUNK = r'(\{%s+?\})+?' % _TAG
_CHINK = r'(%s+?)+?' % _TAG
_VALID = re.compile(r'(\{?%s\}?)*?' % _TAG)
_BRACKETS = re.compile('[^\{\}]+')
_BALANCED_BRACKETS = re.compile(r'(\{\})*$')
def __init__(self, chunk_struct, debug_level=3):
"""
Construct a new C{ChunkString} that encodes the chunking of
the text C{tagged_tokens}.
@type chunk_struct: C{Tree}
@param chunk_struct: The chunk structured to be further chunked.
@type debug_level: int
@param debug_level: The level of debugging which should be
applied to transformations on the C{ChunkString}. The
valid levels are:
- 0: no checks
- 1: full check on to_chunkstruct
- 2: full check on to_chunkstruct and cursory check after
each transformation.
- 3: full check on to_chunkstruct and full check after
each transformation.
We recommend you use at least level 1. You should
probably use level 3 if you use any non-standard
subclasses of C{RegexpChunkRule}.
"""
self._top_node = chunk_struct.node
self._pieces = chunk_struct[:]
tags = [self._tag(tok) for tok in self._pieces]
self._str = '<' + '><'.join(tags) + '>'
self._debug = debug_level
def _tag(self, tok):
if type(tok) == types.TupleType:
return tok[1]
elif isinstance(tok, Tree):
return tok.node
else:
raise ValueError, 'chunk structures must contain tokens and trees'
def _verify(self, verify_tags):
"""
Check to make sure that C{_str} still corresponds to some chunked
version of C{_pieces}.
@type verify_tags: C{boolean}
@param verify_tags: Whether the individual tags should be
checked. If this is false, C{_verify} will check to make
sure that C{_str} encodes a chunked version of I{some}
list of tokens. If this is true, then C{_verify} will
check to make sure that the tags in C{_str} match those in
C{_pieces}.
@raise ValueError: if this C{ChunkString}'s internal string
representation is invalid or not consistent with _pieces.
"""
# Check overall form
if not ChunkString._VALID.match(self._str):
raise ValueError('Transformation generated invalid chunkstring: %s' % self._str)
# Check that parens are balanced. If the string is long, we
# have to do this in pieces, to avoid a maximum recursion
# depth limit for regular expressions.
brackets = ChunkString._BRACKETS.sub('', self._str)
for i in range(1+len(brackets)/5000):
substr = brackets[i*5000:i*5000+5000]
if not ChunkString._BALANCED_BRACKETS.match(substr):
raise ValueError('Transformation generated invalid chunkstring: %s' % substr)
if verify_tags<=0: return
tags1 = (re.split(r'[\{\}<>]+', self._str))[1:-1]
tags2 = [self._tag(piece) for piece in self._pieces]
if tags1 != tags2:
raise ValueError('Transformation generated invalid chunkstring: %s / %s' % (tags1,tags2))
def to_chunkstruct(self, chunk_node='CHUNK'):
"""
@return: the chunk structure encoded by this C{ChunkString}.
@rtype: C{Tree}
@raise ValueError: If a transformation has generated an
invalid chunkstring.
"""
if self._debug > 0: self._verify(1)
# Use this alternating list to create the chunkstruct.
pieces = []
index = 0
piece_in_chunk = 0
for piece in re.split('[{}]', self._str):
# Find the list of tokens contained in this piece.
length = piece.count('<')
subsequence = self._pieces[index:index+length]
# Add this list of tokens to our pieces.
if piece_in_chunk:
pieces.append(Tree(chunk_node, subsequence))
else:
pieces += subsequence
# Update index, piece_in_chunk
index += length
piece_in_chunk = not piece_in_chunk
return Tree(self._top_node, pieces)
def xform(self, regexp, repl):
"""
Apply the given transformation to this C{ChunkString}'s string
encoding. In particular, find all occurances that match
C{regexp}, and replace them using C{repl} (as done by
C{re.sub}).
This transformation should only add and remove braces; it
should I{not} modify the sequence of angle-bracket delimited
tags. Furthermore, this transformation may not result in
improper bracketing. Note, in particular, that bracketing may
not be nested.
@type regexp: C{string} or C{regexp}
@param regexp: A regular expression matching the substring
that should be replaced. This will typically include a
named group, which can be used by C{repl}.
@type repl: C{string}
@param repl: An expression specifying what should replace the
matched substring. Typically, this will include a named
replacement group, specified by C{regexp}.
@rtype: C{None}
@raise ValueError: If this transformation generated an
invalid chunkstring.
"""
# Do the actual substitution
self._str = re.sub(regexp, repl, self._str)
# The substitution might have generated "empty chunks"
# (substrings of the form "{}"). Remove them, so they don't
# interfere with other transformations.
self._str = re.sub('\{\}', '', self._str)
# Make sure that the transformation was legal.
if self._debug > 1: self._verify(self._debug-2)
def xform_chunk(self, pattern, repl):
# Docstring adopted from xform's docstring.
"""
Apply the given transformation to the chunks in this
C{ChunkString}'s string encoding. In particular, find all
occurances within chunks that match C{regexp}, and replace
them using C{repl} (as done by C{re.sub}).
This transformation should only add and remove braces; it
should I{not} modify the sequence of angle-bracket delimited
tags. Furthermore, this transformation may not result in
improper bracketing. Note, in particular, that bracketing may
not be nested.
@type pattern: C{string}
@param pattern: A regular expression pattern matching the substring
that should be replaced. This will typically include a
named group, which can be used by C{repl}.
@type repl: C{string}
@param repl: An expression specifying what should replace the
matched substring. Typically, this will include a named
replacement group, specified by C{regexp}.
@rtype: C{None}
@raise ValueError: If this transformation generated an
invalid chunkstring.
"""
if type(pattern).__name__ == 'SRE_Pattern': pattern = pattern.pattern
self.xform(pattern+ChunkString.IN_CHUNK_PATTERN, repl)
def xform_chink(self, pattern, repl):
# Docstring adopted from xform's docstring.
"""
Apply the given transformation to the chinks in this
C{ChinkString}'s string encoding. In particular, find all
occurances within chinks that match C{regexp}, and replace
them using C{repl} (as done by C{re.sub}).
This transformation should only add and remove braces; it
should I{not} modify the sequence of angle-bracket delimited
tags. Furthermore, this transformation may not result in
improper bracketing. Note, in particular, that bracketing may
not be nested.
@type pattern: C{string} or C{regexp}
@param pattern: A regular expression pattern matching the substring
that should be replaced. This will typically include a
named group, which can be used by C{repl}.
@type repl: C{string}
@param repl: An expression specifying what should replace the
matched substring. Typically, this will include a named
replacement group, specified by C{regexp}.
@rtype: C{None}
@raise ValueError: If this transformation generated an
invalid chunkstring.
"""
if type(pattern).__name__ == 'SRE_Pattern': pattern = pattern.pattern
self.xform(pattern+ChunkString.IN_CHINK_PATTERN, repl)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ChunkString}. This
string representation has the form::
<ChunkString: '{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}'>
"""
return '<ChunkString: %s>' % `self._str`
def __str__(self):
"""
@rtype: C{string}
@return: A formatted representation of this C{ChunkString}'s
string encoding. This representation will include extra
spaces to ensure that tags will line up with the
representation of other C{ChunkStrings} for the same text,
regardless of the chunking.
"""
# Add spaces to make everything line up.
str = re.sub(r'>(?!\})', r'> ', self._str)
str = re.sub(r'([^\{])<', r'\1 <', str)
if str[0] == '<': str = ' ' + str
return str
##//////////////////////////////////////////////////////
## Rules
##//////////////////////////////////////////////////////
def tag_pattern2re_pattern(tag_pattern):
"""
Convert a tag pattern to a regular expression pattern. A X{tag
pattern} is a modified version of a regular expression, designed
for matching sequences of tags. The differences between regular
expression patterns and tag patterns are:
- In tag patterns, C{'<'} and C{'>'} act as parenthases; so
C{'<NN>+'} matches one or more repetitions of C{'<NN>'}, not
C{'<NN'} followed by one or more repetitions of C{'>'}.
- Whitespace in tag patterns is ignored. So
C{'<DT> | <NN>'} is equivalant to C{'<DT>|<NN>'}
- In tag patterns, C{'.'} is equivalant to C{'[^{}<>]'}; so
C{'<NN.*>'} matches any single tag starting with C{'NN'}.
In particular, C{tag_pattern2re_pattern} performs the following
transformations on the given pattern:
- Replace '.' with '[^<>{}]'
- Remove any whitespace
- Add extra parens around '<' and '>', to make '<' and '>' act
like parenthases. E.g., so that in '<NN>+', the '+' has scope
over the entire '<NN>'; and so that in '<NN|IN>', the '|' has
scope over 'NN' and 'IN', but not '<' or '>'.
- Check to make sure the resulting pattern is valid.
@type tag_pattern: C{string}
@param tag_pattern: The tag pattern to convert to a regular
expression pattern.
@raise ValueError: If C{tag_pattern} is not a valid tag pattern.
In particular, C{tag_pattern} should not include braces; and it
should not contain nested or mismatched angle-brackets.
@rtype: C{string}
@return: A regular expression pattern corresponding to
C{tag_pattern}.
"""
# Clean up the regular expression
tag_pattern = re.sub(r'\s', '', tag_pattern)
tag_pattern = re.sub(r'<', '(<(', tag_pattern)
tag_pattern = re.sub(r'>', ')>)', tag_pattern)
# Check the regular expression
if not _VALID_TAG_PATTERN.match(tag_pattern):
raise ValueError('Bad tag pattern: %s' % tag_pattern)
# Replace "." with _TAGCHAR.
# We have to do this after, since it adds {}[]<>s, which would
# confuse _VALID_TAG_PATTERN.
# PRE doesn't have lookback assertions, so reverse twice, and do
# the pattern backwards (with lookahead assertions). This can be
# made much cleaner once we can switch back to SRE.
def reverse_str(str):
lst = list(str)
lst.reverse()
return ''.join(lst)
tc_rev = reverse_str(_TAGCHAR)
reversed = reverse_str(tag_pattern)
reversed = re.sub(r'\.(?!\\(\\\\)*($|[^\\]))', tc_rev, reversed)
tag_pattern = reverse_str(reversed)
return tag_pattern
class RegexpChunkRule(object):
"""
A rule specifying how to modify the chunking in a C{ChunkString},
using a transformational regular expression. The
C{RegexpChunkRule} class itself can be used to implement any
transformational rule based on regular expressions. There are
also a number of subclasses, which can be used to implement
simpler types of rules, based on matching regular expressions.
Each C{RegexpChunkRule} has a regular expression and a
replacement expression. When a C{RegexpChunkRule} is X{applied}
to a C{ChunkString}, it searches the C{ChunkString} for any
substring that matches the regular expression, and replaces it
using the replacement expression. This search/replace operation
has the same semantics as C{re.sub}.
Each C{RegexpChunkRule} also has a description string, which
gives a short (typically less than 75 characters) description of
the purpose of the rule.
This transformation defined by this C{RegexpChunkRule} should
only add and remove braces; it should I{not} modify the sequence
of angle-bracket delimited tags. Furthermore, this transformation
may not result in nested or mismatched bracketing.
"""
def __init__(self, regexp, repl, descr):
"""
Construct a new RegexpChunkRule.
@type regexp: C{regexp} or C{string}
@param regexp: This C{RegexpChunkRule}'s regular expression.
When this rule is applied to a C{ChunkString}, any
substring that matches C{regexp} will be replaced using
the replacement string C{repl}. Note that this must be a
normal regular expression, not a tag pattern.
@type repl: C{string}
@param repl: This C{RegexpChunkRule}'s replacement
expression. When this rule is applied to a
C{ChunkString}, any substring that matches C{regexp} will
be replaced using C{repl}.
@type descr: C{string}
@param descr: A short description of the purpose and/or effect
of this rule.
"""
if type(regexp).__name__ == 'SRE_Pattern': regexp = regexp.pattern
self._repl = repl
self._descr = descr
if type(regexp) == types.StringType:
self._regexp = re.compile(regexp)
else:
self._regexp = regexp
def apply(self, chunkstr):
# Keep docstring generic so we can inherit it.
"""
Apply this rule to the given C{ChunkString}. See the
class reference documentation for a description of what it
means to apply a rule.
@type chunkstr: C{ChunkString}
@param chunkstr: The chunkstring to which this rule is
applied.
@rtype: C{None}
@raise ValueError: If this transformation generated an
invalid chunkstring.
"""
chunkstr.xform(self._regexp, self._repl)
def descr(self):
"""
@rtype: C{string}
@return: a short description of the purpose and/or effect of
this rule.
"""
return self._descr
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this rule. This
string representation has the form::
<RegexpChunkRule: '{<IN|VB.*>}'->'<IN>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the C{descr} method.
"""
return ('<RegexpChunkRule: '+`self._regexp.pattern`+
'->'+`self._repl`+'>')
class ChunkRule(RegexpChunkRule):
"""
A rule specifying how to add chunks to a C{ChunkString}, using a
matching tag pattern. When applied to a C{ChunkString}, it will
find any substring that matches this tag pattern and that is not
already part of a chunk, and create a new chunk containing that
substring.
"""
def __init__(self, tag_pattern, descr):
"""
Construct a new C{ChunkRule}.
@type tag_pattern: C{string}
@param tag_pattern: This rule's tag pattern. When
applied to a C{ChunkString}, this rule will
chunk any substring that matches this tag pattern and that
is not already part of a chunk.
@type descr: C{string}
@param descr: A short description of the purpose and/or effect
of this rule.
"""
self._pattern = tag_pattern
regexp = re.compile('(?P<chunk>%s)%s' %
(tag_pattern2re_pattern(tag_pattern),
ChunkString.IN_CHINK_PATTERN))
RegexpChunkRule.__init__(self, regexp, '{\g<chunk>}', descr)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this rule. This
string representation has the form::
<ChunkRule: '<IN|VB.*>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the C{descr} method.
"""
return '<ChunkRule: '+`self._pattern`+'>'
class ChinkRule(RegexpChunkRule):
"""
A rule specifying how to remove chinks to a C{ChunkString},
using a matching tag pattern. When applied to a
C{ChunkString}, it will find any substring that matches this
tag pattern and that is contained in a chunk, and remove it
from that chunk, thus creating two new chunks.
"""
def __init__(self, tag_pattern, descr):
"""
Construct a new C{ChinkRule}.
@type tag_pattern: C{string}
@param tag_pattern: This rule's tag pattern. When
applied to a C{ChunkString}, this rule will
find any substring that matches this tag pattern and that
is contained in a chunk, and remove it from that chunk,
thus creating two new chunks.
@type descr: C{string}
@param descr: A short description of the purpose and/or effect
of this rule.
"""
self._pattern = tag_pattern
regexp = re.compile('(?P<chink>%s)%s' %
(tag_pattern2re_pattern(tag_pattern),
ChunkString.IN_CHUNK_PATTERN))
RegexpChunkRule.__init__(self, regexp, '}\g<chink>{', descr)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this rule. This
string representation has the form::
<ChinkRule: '<IN|VB.*>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the C{descr} method.
"""
return '<ChinkRule: '+`self._pattern`+'>'
class UnChunkRule(RegexpChunkRule):
"""
A rule specifying how to remove chunks to a C{ChunkString},
using a matching tag pattern. When applied to a
C{ChunkString}, it will find any complete chunk that matches this
tag pattern, and un-chunk it.
"""
def __init__(self, tag_pattern, descr):
"""
Construct a new C{UnChunkRule}.
@type tag_pattern: C{string}
@param tag_pattern: This rule's tag pattern. When
applied to a C{ChunkString}, this rule will
find any complete chunk that matches this tag pattern,
and un-chunk it.
@type descr: C{string}
@param descr: A short description of the purpose and/or effect
of this rule.
"""
self._pattern = tag_pattern
regexp = re.compile('\{(?P<chunk>%s)\}' %
tag_pattern2re_pattern(tag_pattern))
RegexpChunkRule.__init__(self, regexp, '\g<chunk>', descr)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this rule. This
string representation has the form::
<UnChunkRule: '<IN|VB.*>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the C{descr} method.
"""
return '<UnChunkRule: '+`self._pattern`+'>'
class MergeRule(RegexpChunkRule):
"""
A rule specifying how to merge chunks in a C{ChunkString}, using
two matching tag patterns: a left pattern, and a right pattern.
When applied to a C{ChunkString}, it will find any chunk whose end
matches left pattern, and immediately followed by a chunk whose
beginning matches right pattern. It will then merge those two
chunks into a single chunk.
"""
def __init__(self, left_tag_pattern, right_tag_pattern, descr):
"""
Construct a new C{MergeRule}.
@type right_tag_pattern: C{string}
@param right_tag_pattern: This rule's right tag
pattern. When applied to a C{ChunkString}, this
rule will find any chunk whose end matches
C{left_tag_pattern}, and immediately followed by a chunk
whose beginning matches this pattern. It will
then merge those two chunks into a single chunk.
@type left_tag_pattern: C{string}
@param left_tag_pattern: This rule's left tag
pattern. When applied to a C{ChunkString}, this
rule will find any chunk whose end matches
this pattern, and immediately followed by a chunk
whose beginning matches C{right_tag_pattern}. It will
then merge those two chunks into a single chunk.
@type descr: C{string}
@param descr: A short description of the purpose and/or effect
of this rule.
"""
self._left_tag_pattern = left_tag_pattern
self._right_tag_pattern = right_tag_pattern
regexp = re.compile('(?P<left>%s)}{(?=%s)' %
(tag_pattern2re_pattern(left_tag_pattern),
tag_pattern2re_pattern(right_tag_pattern)))
RegexpChunkRule.__init__(self, regexp, '\g<left>', descr)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this rule. This
string representation has the form::
<MergeRule: '<NN|DT|JJ>', '<NN|JJ>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the C{descr} method.
"""
return ('<MergeRule: '+`self._left_tag_pattern`+', '+
`self._right_tag_pattern`+'>')
class SplitRule(RegexpChunkRule):
"""
A rule specifying how to split chunks in a C{ChunkString}, using
two matching tag patterns: a left pattern, and a right pattern.
When applied to a C{ChunkString}, it will find any chunk that
matches the left pattern followed by the right pattern. It will
then split the chunk into two new chunks, at the point between the
two pattern matches.
"""
def __init__(self, left_tag_pattern, right_tag_pattern, descr):
"""
Construct a new C{SplitRule}.
@type right_tag_pattern: C{string}
@param right_tag_pattern: This rule's right tag
pattern. When applied to a C{ChunkString}, this rule will
find any chunk containing a substring that matches
C{left_tag_pattern} followed by this pattern. It will
then split the chunk into two new chunks at the point
between these two matching patterns.
@type left_tag_pattern: C{string}
@param left_tag_pattern: This rule's left tag
pattern. When applied to a C{ChunkString}, this rule will
find any chunk containing a substring that matches this
pattern followed by C{right_tag_pattern}. It will then
split the chunk into two new chunks at the point between
these two matching patterns.
@type descr: C{string}
@param descr: A short description of the purpose and/or effect
of this rule.
"""
self._left_tag_pattern = left_tag_pattern
self._right_tag_pattern = right_tag_pattern
regexp = re.compile('(?P<left>%s)(?=%s)' %
(tag_pattern2re_pattern(left_tag_pattern),
tag_pattern2re_pattern(right_tag_pattern)))
RegexpChunkRule.__init__(self, regexp, r'\g<left>}{', descr)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this rule. This
string representation has the form::
<SplitRule: '<NN>', '<DT>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the C{descr} method.
"""
return ('<SplitRule: '+`self._left_tag_pattern`+', '+
`self._right_tag_pattern`+'>')
class ExpandLeftRule(RegexpChunkRule):
"""
A rule specifying how to expand chunks in a C{ChunkString} to the left,
using two matching tag patterns: a left pattern, and a right pattern.
When applied to a C{ChunkString}, it will find any chunk whose beginning
matches right pattern, and immediately preceded by a chink whose
end matches left pattern. It will then expand the chunk to incorporate
the new material on the left.
"""
def __init__(self, left_tag_pattern, right_tag_pattern, descr):
"""
Construct a new C{ExpandRightRule}.
@type right_tag_pattern: C{string}
@param right_tag_pattern: This rule's right tag
pattern. When applied to a C{ChunkString}, this
rule will find any chunk whose beginning matches
C{right_tag_pattern}, and immediately preceded by a chink
whose end matches this pattern. It will
then merge those two chunks into a single chunk.
@type left_tag_pattern: C{string}
@param left_tag_pattern: This rule's left tag
pattern. When applied to a C{ChunkString}, this
rule will find any chunk whose beginning matches
this pattern, and immediately preceded by a chink
whose end matches C{left_tag_pattern}. It will
then expand the chunk to incorporate the new material on the left.
@type descr: C{string}
@param descr: A short description of the purpose and/or effect
of this rule.
"""
self._left_tag_pattern = left_tag_pattern
self._right_tag_pattern = right_tag_pattern
regexp = re.compile('(?P<left>%s)\{(?P<right>%s)' %
(tag_pattern2re_pattern(left_tag_pattern),
tag_pattern2re_pattern(right_tag_pattern)))
RegexpChunkRule.__init__(self, regexp, '{\g<left>\g<right>', descr)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this rule. This
string representation has the form::
<ExpandLeftRule: '<NN|DT|JJ>', '<NN|JJ>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the C{descr} method.
"""
return ('<ExpandLeftRule: '+`self._left_tag_pattern`+', '+
`self._right_tag_pattern`+'>')
class ExpandRightRule(RegexpChunkRule):
"""
A rule specifying how to expand chunks in a C{ChunkString} to the right,
using two matching tag patterns: a left pattern, and a right pattern.
When applied to a C{ChunkString}, it will find any chunk whose end
matches left pattern, and immediately followed by a chink whose
beginning matches right pattern. It will then expand the chunk to incorporate
the new material on the right.
"""
def __init__(self, left_tag_pattern, right_tag_pattern, descr):
"""
Construct a new C{ExpandRightRule}.
@type right_tag_pattern: C{string}
@param right_tag_pattern: This rule's right tag
pattern. When applied to a C{ChunkString}, this
rule will find any chunk whose end matches
C{left_tag_pattern}, and immediately followed by a chink
whose beginning matches this pattern. It will
then merge those two chunks into a single chunk.
@type left_tag_pattern: C{string}
@param left_tag_pattern: This rule's left tag
pattern. When applied to a C{ChunkString}, this
rule will find any chunk whose end matches
this pattern, and immediately followed by a chink
whose beginning matches C{right_tag_pattern}. It will
then expand the chunk to incorporate the new material on the right.
@type descr: C{string}
@param descr: A short description of the purpose and/or effect
of this rule.
"""
self._left_tag_pattern = left_tag_pattern
self._right_tag_pattern = right_tag_pattern
regexp = re.compile('(?P<left>%s)\}(?P<right>%s)' %
(tag_pattern2re_pattern(left_tag_pattern),
tag_pattern2re_pattern(right_tag_pattern)))
RegexpChunkRule.__init__(self, regexp, '\g<left>\g<right>}', descr)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this rule. This
string representation has the form::
<ExpandRightRule: '<NN|DT|JJ>', '<NN|JJ>'>
Note that this representation does not include the
description string; that string can be accessed
separately with the C{descr} method.
"""
return ('<ExpandRightRule: '+`self._left_tag_pattern`+', '+
`self._right_tag_pattern`+'>')
##//////////////////////////////////////////////////////
## RegexpChunk
##//////////////////////////////////////////////////////
class RegexpChunk(ChunkParseI, AbstractParse):
"""
A regular expression based chunk parser. C{RegexpChunk} uses a
sequence X{rules} to find chunks within a text. The chunking of
the text is encoded using a C{ChunkString}, and each rule acts by
modifying the chunking in the C{ChunkString}. The rules are all
implemented using regular expression matching and substitution.
The C{RegexpChunkRule} class and its subclasses (C{ChunkRule},
C{ChinkRule}, C{UnChunkRule}, C{MergeRule}, and C{SplitRule})
define the rules that are used by C{RegexpChunk}. Each rule
defines an C{apply} method, which modifies the chunking encoded
by a given C{ChunkString}.
@type _rules: C{list} of C{RegexpChunkRule}
@ivar _rules: The list of rules that should be applied to a text.
@type _trace: C{int}
@ivar _trace: The default level of tracing.
"""
def __init__(self, rules, chunk_node='CHUNK', top_node='TEXT', trace=0):
"""
Construct a new C{RegexpChunk}.
@type rules: C{list} of C{RegexpChunkRule}
@param rules: The sequence of rules that should be used to
generate the chunking for a tagged text.
@type chunk_node: C{string}
@param chunk_node: The node value that should be used for
chunk subtrees. This is typically a short string
describing the type of information contained by the chunk,
such as C{"NP"} for base noun phrases.
@type top_node: C{string}
@param top_node: The node value that should be used for the
top node of the chunk structure.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
C{1} will generate normal tracing output; and C{2} or
highter will generate verbose tracing output.
"""
self._rules = rules
self._trace = trace
self._chunk_node = chunk_node
self._top_node = top_node
AbstractParse.__init__(self)
def _trace_apply(self, chunkstr, verbose):
"""
Apply each of this C{RegexpChunk}'s rules to C{chunkstr}, in
turn. Generate trace output between each rule. If C{verbose}
is true, then generate verbose output.
@type chunkstr: C{ChunkString}
@param chunkstr: The chunk string to which each rule should be
applied.
@type verbose: C{boolean}
@param verbose: Whether output should be verbose.
@rtype: C{None}
"""
indent = ' '*(35-len(str(chunkstr))/2)
print 'Input:'
print indent, chunkstr
for rule in self._rules:
rule.apply(chunkstr)
if verbose:
print rule.descr()+' ('+`rule`+'):'
else:
print rule.descr()+':'
print indent, chunkstr
def _notrace_apply(self, chunkstr):
"""
Apply each of this C{RegexpChunk}'s rules to C{chunkstr}, in
turn.
@param chunkstr: The chunk string to which each rule should be
applied.
@type chunkstr: C{ChunkString}
@rtype: C{None}
"""
for rule in self._rules:
rule.apply(chunkstr)
def parse(self, tokens, trace=None):
"""
@type chunk_struct: C{Tree}
@param chunk_struct: the chunk structure to be (further) chunked
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
C{1} will generate normal tracing output; and C{2} or
highter will generate verbose tracing output. This value
overrides the trace level value that was given to the
constructor.
@rtype: C{Tree}
@return: a chunk structure that encodes the chunks in a given
tagged sentence. A chunk is a non-overlapping linguistic
group, such as a noun phrase. The set of chunks
identified in the chunk structure depends on the rules
used to define this C{RegexpChunk}.
"""
if len(tokens) == 0:
print 'Warning: parsing empty text'
return Tree(self._top_node, [])
# Use the default trace value?
if trace == None: trace = self._trace
# Create the chunkstring, using the same properties as the parser
chunkstr = ChunkString(tokens)
# Apply the sequence of rules to the chunkstring.
if trace:
verbose = (trace>1)
self._trace_apply(chunkstr, verbose)
else:
self._notrace_apply(chunkstr)
# Use the chunkstring to create a chunk structure.
return chunkstr.to_chunkstruct(self._chunk_node)
def rules(self):
"""
@return: the sequence of rules used by this C{ChunkParse}.
@rtype: C{list} of C{RegexpChunkRule}
"""
return self._rules
def __repr__(self):
"""
@return: a concise string representation of this
C{RegexpChunk}.
@rtype: C{string}
"""
return "<RegexpChunk with %d rules>" % len(self._rules)
def __str__(self):
"""
@return: a verbose string representation of this
C{RegexpChunk}.
@rtype: C{string}
"""
s = "RegexpChunk with %d rules:\n" % len(self._rules)
margin = 0
for rule in self._rules:
margin = max(margin, len(rule.descr()))
if margin < 35:
format = " %" + `-(margin+3)` + "s%s\n"
else:
format = " %s\n %s\n"
for rule in self._rules:
s += format % (rule.descr(), `rule`)
return s[:-1]
##//////////////////////////////////////////////////////
## Demonstration code
##//////////////////////////////////////////////////////
def demo_eval(chunkparser, text):
"""
Demonstration code for evaluating a chunk parser, using a
C{ChunkScore}. This function assumes that C{text} contains one
sentence per line, and that each sentence has the form expected by
C{tree.chunk}. It runs the given chunk parser on each sentence in
the text, and scores the result. It prints the final score
(precision, recall, and f-measure); and reports the set of chunks
that were missed and the set of chunks that were incorrect. (At
most 10 missing chunks and 10 incorrect chunks are reported).
@param chunkparser: The chunkparser to be tested
@type chunkparser: C{ChunkParseI}
@param text: The chunked tagged text that should be used for
evaluation.
@type text: C{string}
"""
# Evaluate our chunk parser.
chunkscore = ChunkScore()
from en.parser.nltk_lite.parse import tree
for sentence in text.split('\n'):
print sentence
sentence = sentence.strip()
if not sentence: continue
gold = tree.chunk(sentence)
tokens = gold.leaves()
test = chunkparser.parse(tree.Tree('S', tokens))
chunkscore.score(gold, test)
print '/'+('='*75)+'\\'
print 'Scoring', chunkparser
print ('-'*77)
print 'Precision: %5.1f%%' % (chunkscore.precision()*100), ' '*4,
print 'Recall: %5.1f%%' % (chunkscore.recall()*100), ' '*6,
print 'F-Measure: %5.1f%%' % (chunkscore.f_measure()*100)
# Missed chunks.
if chunkscore.missed():
print 'Missed:'
missed = chunkscore.missed()
for chunk in missed[:10]:
print ' ', chunk
if len(chunkscore.missed()) > 10:
print ' ...'
# Incorrect chunks.
if chunkscore.incorrect():
print 'Incorrect:'
incorrect = chunkscore.incorrect()
for chunk in incorrect[:10]:
print ' ', chunk
if len(chunkscore.incorrect()) > 10:
print ' ...'
print '\\'+('='*75)+'/'
def demo_cascade(chunkparsers, text):
"""
Demonstration code for cascading chunk parsers.
@param chunkparser: The chunkparsers to be tested
@type chunkparser: C{ChunkParseI}
@param text: The chunked tagged text that should be used for evaluation.
@type text: C{string}
"""
from en.parser.nltk_lite.parse.tree import Tree
for sentence in text.split('\n'):
print sentence
sentence = sentence.strip()
if not sentence: continue
gold = tree.chunk(sentence)
pieces = gold.leaves()
for chunkparser in chunkparsers:
pieces = chunkparser.parse(Tree('S', pieces))
print pieces
print
def demo():
"""
A demonstration for the C{RegexpChunk} class. A single text is
parsed with four different chunk parsers, using a variety of rules
and strategies.
"""
from en.parser.nltk_lite import parse
from en.parser.nltk_lite.tag import string2tags
from en.parser.nltk_lite.parse.tree import Tree
text = """\
[ the/DT little/JJ cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] ./.
[ The/DT cats/NNS ] ./.
[ John/NNP ] saw/VBD [the/DT cat/NN] [the/DT dog/NN] liked/VBD ./.
[ John/NNP ] saw/VBD [the/DT cat/NN] the/DT cat/NN liked/VBD ./."""
print '*'*75
print 'Evaluation text:'
print text
print '*'*75
# Use a simple regexp to define regular expressions.
r1 = parse.ChunkRule(r'<DT>?<JJ>*<NN.*>', 'Chunk NPs')
cp = parse.RegexpChunk([r1], chunk_node='NP', trace=1)
parse.demo_eval(cp, text)
print
# Use a chink rule to remove everything that's *not* an NP
r1 = parse.ChunkRule(r'<.*>+', 'Chunk everything')
r2 = parse.ChinkRule(r'<VB.*>|<IN>|<\.>', 'Unchunk VB and IN and .')
cp = parse.RegexpChunk([r1, r2], chunk_node='NP', trace=1)
parse.demo_eval(cp, text)
print
# Unchunk non-NP words, and then merge consecutive NPs
r1 = parse.ChunkRule(r'(<.*>)', 'Chunk each tag')
r2 = parse.UnChunkRule(r'<VB.*>|<IN>|<.>', 'Unchunk VB? and IN and .')
r3 = parse.MergeRule(r'<DT|JJ|NN.*>', r'<DT|JJ|NN.*>', 'Merge NPs')
cp = parse.RegexpChunk([r1,r2,r3], chunk_node='NP', trace=1)
parse.demo_eval(cp, text)
print
# Chunk sequences of NP words, and split them at determiners
r1 = parse.ChunkRule(r'(<DT|JJ|NN.*>+)', 'Chunk sequences of DT&JJ&NN')
r2 = parse.SplitRule('', r'<DT>', 'Split before DT')
cp = parse.RegexpChunk([r1,r2], chunk_node='NP', trace=1)
parse.demo_eval(cp, text)
print
print "============== Cascaded Chunking =============="
print
np_chunk = parse.ChunkRule(r'<DT|JJ|NN.*>+', 'Chunk sequences of DT, JJ, NN')
np_parse = parse.RegexpChunk([np_chunk], chunk_node='NP')
pp_chunk = parse.ChunkRule(r'<IN><NP>', 'Chunk prepositions followed by NP')
pp_parse = parse.RegexpChunk([pp_chunk], chunk_node='PP')
vp_chunk = parse.ChunkRule(r'<VB.*><NP|PP|S>+$', 'Chunk verbs and arguments/adjuncts')
vp_parse = parse.RegexpChunk([vp_chunk], chunk_node='VP')
s_chunk = parse.ChunkRule(r'<NP><VP>$', 'Chunk NP, VP')
s_parse = parse.RegexpChunk([s_chunk], chunk_node='S')
chunkparsers = [np_parse, pp_parse, vp_parse, s_parse, vp_parse, s_parse]
text = """John/NNP thinks/VBZ Mary/NN saw/VBD the/DT cat/NN
sit/VB on/IN the/DT mat/NN"""
ttoks = string2tags(text)
sent = Tree('S', ttoks)
for chunkparser in chunkparsers:
sent = chunkparser.parse(sent)
print sent
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Categories
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Contributed by Rob Speer (NLTK version)
# Steven Bird <sb@csse.unimelb.edu.au> (NLTK-Lite Port)
# Ewan Klein <ewan@inf.ed.ac.uk> (Hooks for semantics)
# Peter Wang <wangp@csse.unimelb.edu.au> (Overhaul)
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
#
# $Id: category.py 3580 2006-10-20 04:33:07Z ehk $
from en.parser.nltk_lite.parse.featurestructure import *
from en.parser.nltk_lite.parse import cfg
from en.parser.nltk_lite.semantics import logic
class Category(FeatureStructure, cfg.Nonterminal):
"""
A C{Category} is a specialized feature structure, intended for use in
parsing. It can act as a C{Nonterminal}.
A C{Category} differs from a C{FeatureStructure} in these ways:
- Categories may not be re-entrant.
- Categories use value-based equality, while FeatureStructures use
identity-based equality.
- Strings in Categories are compared case-insensitively.
- Categories have one feature marked as the 'head', which prints
differently than other features if it has a value. For example,
in the C{repr()} representation of a Category, the head goes to the
left, on the outside of the brackets. Subclasses of C{Category}
may change the feature name that is designated as the head, which is
_head by default.
- Subclasses of C{Category} may contain a list of I{required features},
which are names of features whose value is None if unspecified. A
Category lacking a feature that is required in it will not unify with
any Category that has that feature. If a required feature's value is
C{None}, it is considered to be not present. (Mixing different
subclasses of C{Category} is probably a bad idea.)
- C{True} and C{False} are allowed as values. A feature named C{foo}
with a value of C{True} is simply expressed as C{+foo}. Similarly, if
it is C{False}, it is expressed as C{-foo}.
"""
headname = '_head'
requiredFeatures = []
def __init__(self, **features):
FeatureStructure.__init__(self, **features)
self._required = self.__class__.requiredFeatures
for name in self._required:
if not self._features.has_key(name):
self._features[name] = None
items = self._features.items()
items.sort()
self._hash = None
self._frozen = False
self._memorepr = None
def required_features(self):
"@return: A list of the names of all required features."
return self._required
def __cmp__(self, other):
return cmp(repr(self), repr(other))
def __div__(self, other):
"""
@return: A new Category based on this one, with its C{/} feature set to
C{other}.
"""
temp = self.deepcopy()
dict = temp._features
dict['/'] = other
return self.__class__(**dict)
def __eq__(self, other):
"""
@return: True if C{self} and C{other} assign the same value to
to every feature. In particular, return true if
C{self[M{p}]==other[M{p}]} for every feature path M{p} such
that C{self[M{p}]} or C{other[M{p}]} is a base value (i.e.,
not a nested Category).
@rtype: C{bool}
"""
# Get the result of equal_values, and make it a real boolean while
# we're at it.
if not other.__class__ == self.__class__: return False
if hash(self) != hash(other): return False
return (self.equal_values(other) == True)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
if self._hash is not None: return self._hash
items = self._features.items()
items.sort()
return hash(tuple(items))
def freeze(self):
"""
Freezing a Category memoizes its hash value, to make comparisons on it
faster. After freezing, the Category and all its values are immutable.
@return: self
"""
for val in self._features.values():
if isinstance(val, Category) and not val.frozen():
val.freeze()
self._hash = hash(self)
self._memorepr = self._repr({}, {})
self._frozen = True
return self
def frozen(self):
"""
Returns whether this Category is frozen (immutable).
@rtype: C{bool}
"""
return self._frozen
def __setitem__(self, name, value):
if self._frozen: raise "Cannot modify a frozen Category"
self._features[name] = value
def symbol(self):
"""
@return: The node value corresponding to this C{Category}.
@rtype: C{Category}
"""
return self
def head(self):
"""
@return: The head of this category (the value shown outside the
brackets in its string representation). If there is no head, returns
None.
@rtype: C{str} or C{None}
"""
return self._features.get(self.__class__.headname)
def deepcopy(self, memo=None):
"""
@return: A deep copy of C{self}.
"""
newcopy = self.__class__()
features = newcopy._features
# Fill out the features.
for (fname, fval) in self._features.items():
if isinstance(fval, FeatureStructure):
features[fname] = fval.deepcopy()
else:
features[fname] = fval
return newcopy
def reentrances(self):
return []
def feature_names(self):
"""
@return: a list of all features that have values.
"""
return filter(lambda x: not (x in self._required and self[x] is None),
self._features.keys())
def get_feature(self, *args):
try:
return self.__getitem__(*args)
except IndexError:
return StarValue()
def has_feature(self, name):
return (name in self.feature_names())
#################################################################
## Variables
#################################################################
def remove_unbound_vars(self):
selfcopy = self.deepcopy()
selfcopy._remove_unbound_vars()
return selfcopy
def _remove_unbound_vars(self):
for (fname, fval) in self._features.items():
if isinstance(fval, FeatureVariable):
del self._features[fname]
elif isinstance(fval, Category):
fval._remove_unbound_vars()
#################################################################
## Unification
#################################################################
def _destructively_unify(self, other, bindings, trace=False, depth=0):
FeatureStructure._destructively_unify(self, other, bindings, \
trace=trace, ci_str_cmp=True, depth=depth)
#################################################################
## String Representations
#################################################################
def __repr__(self):
"""
@return: A string representation of this feature structure.
"""
if self._memorepr is not None: return self._memorepr
else: return self._repr({}, {})
def _repr(self, reentrances, reentrance_ids):
segments = []
items = self.feature_names()
items.sort() # sorting note: keys are unique strings, so we'll
# never fall through to comparing values.
for fname in items:
if fname == self.__class__.headname: continue
fval = self[fname]
if isinstance(fval, bool):
if fval: segments.append('+%s' % fname)
else: segments.append('-%s' % fname)
elif not isinstance(fval, Category):
segments.append('%s=%r' % (fname, fval))
else:
fval_repr = fval._repr(reentrances, reentrance_ids)
segments.append('%s=%s' % (fname, fval_repr))
head = self._features.get(self.__class__.headname)
if head is None: head = ''
if head and not len(segments): return head
return '%s[%s]' % (head, ', '.join(segments))
def _str(self, reentrances, reentrance_ids):
# This code is very similar to FeatureStructure._str but
# we print the head feature very differently, so it's hard to
# combine the two methods.
# Special case:
if len(self.feature_names()) == 0:
return ['[]']
if self.feature_names() == [self.__class__.headname]:
return ['%s[]' % self[self.__class__.headname]]
# What's the longest feature name? Use this to align names.
maxfnamelen = max([len(k) for k in self.feature_names()])
lines = []
items = self.feature_names()
items.sort() # sorting note: keys are unique strings, so we'll
# never fall through to comparing values.
if self.__class__.headname in items:
items.remove(self.__class__.headname)
# items.insert(0, self.__class__.headname)
for fname in items:
fval = self[fname]
if not isinstance(fval, FeatureStructure):
# It's not a nested feature structure -- just print it.
lines.append('%s = %r' % (fname.ljust(maxfnamelen), fval))
else:
# It's a new feature structure. Separate it from
# other values by a blank line.
if lines and lines[-1] != '': lines.append('')
# Recursively print the feature's value (fval).
fval_lines = fval._str(reentrances, reentrance_ids)
# Indent each line to make room for fname.
fval_lines = [(' '*(maxfnamelen+3))+l for l in fval_lines]
# Pick which line we'll display fname on.
nameline = (len(fval_lines)-1)/2
fval_lines[nameline] = (
fname.ljust(maxfnamelen)+' ='+
fval_lines[nameline][maxfnamelen+2:])
# Add the feature structure to the output.
lines += fval_lines
# Separate FeatureStructures by a blank line.
lines.append('')
# Get rid of any excess blank lines.
if lines[-1] == '': lines = lines[:-1]
# Add brackets around everything.
headline = (len(lines) - 1)/2
if self.has_feature(self.__class__.headname):
head = self[self.__class__.headname]
else:
head = ''
maxlen = max([len(line) for line in lines])
for l in range(len(lines)):
line = lines[l]
if l == headline:
lines[l] = ('%s[ %s%s ]' % (head, line, ' '*(maxlen-len(line))))
else:
lines[l] = ('%s[ %s%s ]' % (' '*len(head), line, ' '*(maxlen-len(line))))
return lines
#################################################################
## Parsing
#################################################################
# Regular expressions for parsing.
# Extend the expressions already present in FeatureStructure._PARSE_RE
_PARSE_RE = {'categorystart': re.compile(r'\s*([^\s\(\)"\'\-=,\[\]]*)\s*\['),
'bool': re.compile(r'\s*([-\+])'),
'arrow': re.compile(r'\s*->\s*'),
#'application': re.compile(r'(app)\((\?[a-z][a-z]*)\s*,\s*(\?[a-z][a-z]*)\)'),
'disjunct': re.compile(r'\s*\|\s*'),
'whitespace': re.compile(r'\s*')}
for (k, v) in FeatureStructure._PARSE_RE.iteritems():
assert k not in _PARSE_RE
_PARSE_RE[k] = v
# [classmethod]
def _parse(cls, s, position=0, reentrances=None):
"""
Helper function that parses a Category.
@param s: The string to parse.
@param position: The position in the string to start parsing.
@param reentrances: A dictionary from reentrance ids to values.
@return: A tuple (val, pos) of the feature structure created
by parsing and the position where the parsed feature
structure ends.
"""
# A set of useful regular expressions (precompiled)
_PARSE_RE = cls._PARSE_RE
# Find the head, if there is one.
match = _PARSE_RE['name'].match(s, position)
if match is not None:
head = match.group(1)
position = match.end()
else: head = None
# Check that the name is followed by an open bracket.
if position >= len(s) or s[position] != '[':
return cls(**{cls.headname: head}), position
position += 1
# If it's immediately followed by a close bracket, then just
# return an empty feature structure.
match = _PARSE_RE['bracket'].match(s, position)
if match is not None:
if head is None: return cls(), match.end()
else: return cls(**{cls.headname: head}), match.end()
# Build a list of the features defined by the structure.
# Each feature has one of the three following forms:
# name = value
# +name
# -name
features = {}
if head is not None: features[cls.headname] = head
while position < len(s):
# Use these variables to hold info about the feature:
name = target = val = None
# Is this a shorthand boolean value?
match = _PARSE_RE['bool'].match(s, position)
if match is not None:
if match.group(1) == '+': val = True
else: val = False
position = match.end()
# Find the next feature's name.
match = _PARSE_RE['name'].match(s, position)
if match is None: raise ValueError('feature name', position)
name = match.group(1)
position = match.end()
# If it's not a shorthand boolean, it must be an assignment.
if val is None:
match = _PARSE_RE['assign'].match(s, position)
if match is None: raise ValueError('equals sign', position)
position = match.end()
val, position = cls._parseval(s, position, reentrances)
features[name] = val
# Check for a close bracket
match = _PARSE_RE['bracket'].match(s, position)
if match is not None:
return cls(**features), match.end()
# Otherwise, there should be a comma
match = _PARSE_RE['comma'].match(s, position)
if match is None: raise ValueError('comma', position)
position = match.end()
# We never saw a close bracket.
raise ValueError('close bracket', position)
# [classmethod]
def _parseval(cls, s, position, reentrances):
"""
Helper function that parses a feature value. Currently
supports: None, bools, integers, variables, strings, nested feature
structures.
@param s: The string to parse.
@param position: The position in the string to start parsing.
@param reentrances: A dictionary from reentrance ids to values.
@return: A tuple (val, pos) of the value created by parsing
and the position where the parsed value ends.
"""
# A set of useful regular expressions (precompiled)
_PARSE_RE = cls._PARSE_RE
# End of string (error)
if position == len(s): raise ValueError('value', position)
# Semantic value of the form <app(?x, ?y) >'; return an ApplicationExpression
match = _PARSE_RE['application'].match(s, position)
if match is not None:
fun = ParserSubstitute(match.group(2)).next()
arg = ParserSubstitute(match.group(3)).next()
return ApplicationExpressionSubst(fun, arg), match.end()
# other semantic value enclosed by '< >'; return value given by the lambda expr parser
match = _PARSE_RE['semantics'].match(s, position)
if match is not None:
return ParserSubstitute(match.group(1)).next(), match.end()
# String value
if s[position] in "'\"":
start = position
quotemark = s[position:position+1]
position += 1
while 1:
match = _PARSE_RE['stringmarker'].search(s, position)
if not match: raise ValueError('close quote', position)
position = match.end()
if match.group() == '\\': position += 1
elif match.group() == quotemark:
return eval(s[start:position]), position
# Nested category
if _PARSE_RE['categorystart'].match(s, position) is not None:
return cls._parse(s, position, reentrances)
# Variable
match = _PARSE_RE['var'].match(s, position)
if match is not None:
return FeatureVariable.parse(match.group()), match.end()
# None
match = _PARSE_RE['none'].match(s, position)
if match is not None:
return None, match.end()
# Integer value
match = _PARSE_RE['int'].match(s, position)
if match is not None:
return int(match.group()), match.end()
# Alphanumeric symbol (must be checked after integer)
match = _PARSE_RE['symbol'].match(s, position)
if match is not None:
return cls(**{cls.headname: match.group()}), match.end()
# We don't know how to parse this value.
raise ValueError('value', position)
# [classmethod]
# Used by GrammarFile
def parse_rules(cls, s):
"""
Parse a L{CFG} line involving C{Categories}. A line has this form:
C{lhs -> rhs | rhs | ...}
where C{lhs} is a Category, and each C{rhs} is a sequence of
Categories.
@returns: a list of C{Productions}, one for each C{rhs}.
"""
_PARSE_RE = cls._PARSE_RE
position = 0
try:
lhs, position = cls._parse(s, position)
except ValueError, e:
estr = ('Error parsing field structure\n\n\t' +
s + '\n\t' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
raise ValueError, estr
lhs.freeze()
match = _PARSE_RE['arrow'].match(s, position)
if match is None: raise ValueError('arrow', position)
else: position = match.end()
rules = []
while position < len(s):
rhs = []
while position < len(s) and _PARSE_RE['disjunct'].match(s, position) is None:
try:
val, position = cls._parseval(s, position, {})
except ValueError, e:
estr = ('Error parsing field structure\n\n\t' +
s + '\n\t' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
raise ValueError, estr
if isinstance(val, Category): val.freeze()
rhs.append(val)
position = _PARSE_RE['whitespace'].match(s, position).end()
rules.append(cfg.Production(lhs, rhs))
if position < len(s):
match = _PARSE_RE['disjunct'].match(s, position)
position = match.end()
# Special case: if there's nothing after the arrow, it is one rule with
# an empty RHS, instead of no rules.
if len(rules) == 0: rules = [cfg.Production(lhs, ())]
return rules
_parseval=classmethod(_parseval)
_parse=classmethod(_parse)
parse_rules=classmethod(parse_rules)
class GrammarCategory(Category):
"""
A class of C{Category} for use in parsing.
The name of the head feature in a C{GrammarCategory} is C{pos} (for "part
of speech"). There is one required feature, C{/}, which is intended to
indicate a type of phrase that is missing from the grammatical structure.
In addition, GrammarCategories are displayed and parse differently, to be
consistent with NLP teaching materials: the value of the C{/} feature can
be written with a slash after the right bracket, so that the string
representation looks like: C{head[...]/value}.
An example of a C{GrammarCategory} is C{VP[+fin]/NP}, for a verb phrase
that is finite and has an omitted noun phrase inside it.
"""
headname = 'pos'
requiredFeatures = ['/']
def _repr(self, reentrances, reentrance_ids):
segments = []
items = self.feature_names()
items.sort() # sorting note: keys are unique strings, so we'll
# never fall through to comparing values.
for fname in items:
if fname == self.__class__.headname or fname == '/': continue
fval = self[fname]
if isinstance(fval, bool):
if fval: segments.append('+%s' % fname)
else: segments.append('-%s' % fname)
elif isinstance(fval, logic.Expression):
segments.append('%s=%r' % (fname, fval.__str__()))
elif not isinstance(fval, Category):
segments.append('%s=%r' % (fname, fval))
else:
fval_repr = fval._repr(reentrances, reentrance_ids)
segments.append('%s=%s' % (fname, fval_repr))
head = self._features.get(self.__class__.headname)
if head is None: head = ''
if not len(segments): features = ''
else: features = "[%s]" % ', '.join(segments)
slash = self._features.get('/')
if slash is None: slash = ''
else: slash = '/%r' % slash
return '%s%s%s' % (head, features, slash)
# Regular expressions for parsing.
# Extend the expressions from Category and FeatureStructure.
# Assumes that Applications in sem always take FeatureVariable arguments
_PARSE_RE = {'semantics': re.compile(r'<([^>]+)>'),
'application': re.compile(r'<(app)\((\?[a-z][a-z]*)\s*,\s*(\?[a-z][a-z]*)\)>'),
'slash': re.compile(r'\s*/\s*')}
for (k, v) in Category._PARSE_RE.iteritems():
assert k not in _PARSE_RE
_PARSE_RE[k] = v
# These we actually do want to override.
_PARSE_RE['name'] = re.compile(r'\s*([^\s\(\)"\'\-=,\[\]/]+)\s*')
_PARSE_RE['categorystart'] = re.compile(r'\s*([^\s\(\)"\'\-=,\[\]/]*)\s*(\[|/)')
# [classmethod]
def _parse(cls, s, position=0, reentrances=None):
# A set of useful regular expressions (precompiled)
_PARSE_RE = cls._PARSE_RE
features = {}
# Find the head, if there is one.
match = _PARSE_RE['name'].match(s, position)
if match is not None:
features[cls.headname] = match.group(1)
position = match.end()
# If the name is followed by an open bracket, start looking for
# features.
if position < len(s) and s[position] == '[':
position += 1
# Build a list of the features defined by the structure.
# Each feature has one of the three following forms:
# name = value
# +name
# -name
while True:
if not position < len(s):
raise ValueError('close bracket', position)
# Use these variables to hold info about the feature:
name = target = val = None
# Check for a close bracket at the beginning
match = _PARSE_RE['bracket'].match(s, position)
if match is not None:
position = match.end()
# Get out and check for a slash value.
break
# Is this a shorthand boolean value?
match = _PARSE_RE['bool'].match(s, position)
if match is not None:
if match.group(1) == '+': val = True
else: val = False
position = match.end()
# Find the next feature's name.
match = _PARSE_RE['name'].match(s, position)
if match is None: raise ValueError('feature name', position)
name = match.group(1)
position = match.end()
# If it's not a shorthand boolean, it must be an assignment.
if val is None:
match = _PARSE_RE['assign'].match(s, position)
if match is None: raise ValueError('equals sign', position)
position = match.end()
val, position = cls._parseval(s, position, reentrances)
features[name] = val
# Check for a close bracket
match = _PARSE_RE['bracket'].match(s, position)
if match is not None:
position = match.end()
# Get out and check for a slash value.
break
# Otherwise, there should be a comma
match = _PARSE_RE['comma'].match(s, position)
if match is None: raise ValueError('comma', position)
position = match.end()
# Check for a slash value
match = _PARSE_RE['slash'].match(s, position)
if match is not None:
position = match.end()
slash, position = cls._parseval(s, position, 0)
features['/'] = slash
return cls(**features), position
_parse = classmethod(_parse)
class ParserSubstitute(logic.Parser):
"""
A lambda calculus expression parser, extended to create application
expressions which support the SubstituteBindingsI interface.
"""
def make_ApplicationExpression(self, first, second):
return ApplicationExpressionSubst(first, second)
class ApplicationExpressionSubst(logic.ApplicationExpression, SubstituteBindingsI):
"""
A lambda application expression, extended to implement the
SubstituteBindingsI interface.
"""
def substitute_bindings(self, bindings):
newval = self
for semvar in self.variables():
varstr = str(semvar)
# discard Variables which are not FeatureVariables
if varstr.startswith('?'):
var = FeatureVariable.parse(varstr)
if bindings.is_bound(var):
newval = newval.replace(semvar, bindings.lookup(var))
return newval
def demo():
print "Category(pos='n', agr=Category(number='pl', gender='f')):"
print
print Category(pos='n', agr=Category(number='pl', gender='f'))
print
print "GrammarCategory.parse('VP[+fin]/NP[+pl]'):"
print
print GrammarCategory.parse('VP[+fin]/NP[+pl]')
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Parsers
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
"""
Classes and interfaces for producing tree structures that represent
the internal organization of a text. This task is known as X{parsing}
the text, and the resulting tree structures are called the text's
X{parses}. Typically, the text is a single sentence, and the tree
structure represents the syntactic structure of the sentence.
However, parsers can also be used in other domains. For example,
parsers can be used to derive the morphological structure of the
morphemes that make up a word, or to derive the discourse structure
for a set of utterances.
Sometimes, a single piece of text can be represented by more than one
tree structure. Texts represented by more than one tree structure are
called X{ambiguous} texts. Note that there are actually two ways in
which a text can be ambiguous:
- The text has multiple correct parses.
- There is not enough information to decide which of several
candidate parses is correct.
However, the parser module does I{not} distinguish these two types of
ambiguity.
The parser module defines C{ParseI}, a standard interface for parsing
texts; and two simple implementations of that interface,
C{ShiftReduce} and C{RecursiveDescent}. It also contains
three sub-modules for specialized kinds of parsing:
- C{nltk.parser.chart} defines chart parsing, which uses dynamic
programming to efficiently parse texts.
- C{nltk.parser.chunk} defines chunk parsing, which identifies
non-overlapping linguistic groups in a text.
- C{nltk.parser.probabilistic} defines probabilistic parsing, which
associates a probability with each parse.
"""
##//////////////////////////////////////////////////////
## Parser Interface
##//////////////////////////////////////////////////////
class ParseI(object):
"""
A processing class for deriving trees that represent possible
structures for a sequence of tokens. These tree structures are
known as X{parses}. Typically, parsers are used to derive syntax
trees for sentences. But parsers can also be used to derive other
kinds of tree structure, such as morphological trees and discourse
structures.
"""
def parse(self, sent):
"""
Derive a parse tree that represents the structure of the given
sentences words, and return a Tree. If no parse is found,
then output C{None}. If multiple parses are found, then
output the best parse.
The parsed trees derive a structure for the subtokens, but do
not modify them. In particular, the leaves of the subtree
should be equal to the list of subtokens.
@param sent: The sentence to be parsed
@type sent: L{list} of L{string}
"""
raise NotImplementedError()
def get_parse(self, sent):
"""
@return: A parse tree that represents the structure of the
sentence. If no parse is found, then return C{None}.
@rtype: L{Tree}
@param sent: The sentence to be parsed
@type sent: L{list} of L{string}
"""
def get_parse_list(self, sent):
"""
@return: A list of the parse trees for the sentence. When possible,
this list should be sorted from most likely to least likely.
@rtype: C{list} of L{Tree}
@param sent: The sentence to be parsed
@type sent: L{list} of L{string}
"""
def get_parse_probs(self, sent):
"""
@return: A probability distribution over the parse trees for the sentence.
@rtype: L{ProbDistI}
@param sent: The sentence to be parsed
@type sent: L{list} of L{string}
"""
def get_parse_dict(self, sent):
"""
@return: A dictionary mapping from the parse trees for the
sentence to numeric scores.
@rtype: C{dict}
@param sent: The sentence to be parsed
@type sent: L{list} of L{string}
"""
##//////////////////////////////////////////////////////
## Abstract Base Class for Parsers
##//////////////////////////////////////////////////////
class AbstractParse(ParseI):
"""
An abstract base class for parsers. C{AbstractParse} provides
a default implementation for:
- L{parse} (based on C{get_parse})
- L{get_parse_list} (based on C{get_parse})
- L{get_parse} (based on C{get_parse_list})
Note that subclasses must override either C{get_parse} or
C{get_parse_list} (or both), to avoid infinite recursion.
"""
def __init__(self):
"""
Construct a new parser.
"""
# Make sure we're not directly instantiated:
if self.__class__ == AbstractParse:
raise AssertionError, "Abstract classes can't be instantiated"
def parse(self, token):
return self.get_parse(token)
def grammar(self):
return self._grammar
def get_parse(self, token):
trees = self.get_parse_list(token)
if len(trees) == 0: return None
else: return trees[0]
def get_parse_list(self, token):
tree = self.get_parse(token)
if tree is None: return []
else: return [tree]
from tree import *
from cfg import *
from pcfg import *
from featurestructure import *
from sr import *
from rd import *
from chunk import *
from chart import *
from viterbi import *
| Python |
# Natural Language Toolkit: Text Trees
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Class for representing hierarchical language structures, such as
syntax trees and morphological trees.
"""
import re, types
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse import cfg
from en.parser.nltk_lite.probability import ProbabilisticMixIn
######################################################################
## Trees
######################################################################
class Tree(list):
"""
A hierarchical structure.
Each C{Tree} represents a single hierarchical grouping of
leaves and subtrees. For example, each constituent in a syntax
tree is represented by a single C{Tree}.
A tree's children are encoded as a C{list} of leaves and subtrees,
where a X{leaf} is a basic (non-tree) value; and a X{subtree} is a
nested C{Tree}.
Any other properties that a C{Tree} defines are known as
X{node properties}, and are used to add information about
individual hierarchical groupings. For example, syntax trees use a
NODE property to label syntactic constituents with phrase tags,
such as \"NP\" and\"VP\".
Several C{Tree} methods use X{tree positions} to specify
children or descendants of a tree. Tree positions are defined as
follows:
- The tree position M{i} specifies a C{Tree}'s M{i}th child.
- The tree position C{()} specifies the C{Tree} itself.
- If C{M{p}} is the tree position of descendant M{d}, then
C{M{p}+(M{i})} specifies the C{M{i}}th child of M{d}.
I.e., every tree position is either a single index C{M{i}},
specifying C{self[M{i}]}; or a sequence C{(M{i1}, M{i2}, ...,
M{iN})}, specifying
C{self[M{i1}][M{i2}]...[M{iN}]}.
"""
def __init__(self, node, children):
"""
Construct a new tree.
"""
if isinstance(children, (str, unicode)):
raise TypeError, 'children should be a list, not a string'
list.__init__(self, children)
self.node = node
#////////////////////////////////////////////////////////////
# Comparison operators
#////////////////////////////////////////////////////////////
def __cmp__(self, other):
c = cmp(self.node, other.node)
if c != 0: return c
else: return list.__cmp__(self, other)
def __eq__(self, other):
if not isinstance(other, Tree): return False
return self.node == other.node and list.__eq__(self, other)
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return cmp(self, other) < 0
def __le__(self, other):
return cmp(self, other) <= 0
def __gt__(self, other):
return cmp(self, other) > 0
def __ge__(self, other):
return cmp(self, other) >= 0
#////////////////////////////////////////////////////////////
# Disabled list operations
#////////////////////////////////////////////////////////////
def __mul__(self, v):
raise TypeError('Tree does not support multiplication')
def __rmul__(self, v):
raise TypeError('Tree does not support multiplication')
def __add__(self, v):
raise TypeError('Tree does not support addition')
def __radd__(self, v):
raise TypeError('Tree does not support addition')
#////////////////////////////////////////////////////////////
# Indexing (with support for tree positions)
#////////////////////////////////////////////////////////////
def __getitem__(self, index):
if isinstance(index, int):
return list.__getitem__(self, index)
else:
if len(index) == 0:
return self
elif len(index) == 1:
return self[int(index[0])]
else:
return self[int(index[0])][index[1:]]
def __setitem__(self, index, value):
if isinstance(index, int):
return list.__setitem__(self, index, value)
else:
if len(index) == 0:
raise IndexError('The tree position () may not be '
'assigned to.')
elif len(index) == 1:
self[index[0]] = value
else:
self[index[0]][index[1:]] = value
def __delitem__(self, index):
if isinstance(index, int):
return list.__delitem__(self, index)
else:
if len(index) == 0:
raise IndexError('The tree position () may not be deleted.')
elif len(index) == 1:
del self[index[0]]
else:
del self[index[0]][index[1:]]
#////////////////////////////////////////////////////////////
# Basic tree operations
#////////////////////////////////////////////////////////////
def leaves(self):
"""
@return: a list containing this tree's leaves. The
order of leaves in the tuple reflects the order of the
leaves in the tree's hierarchical structure.
@rtype: C{list}
"""
leaves = []
for child in self:
if isinstance(child, Tree):
leaves.extend(child.leaves())
else:
leaves.append(child)
return leaves
def flatten(self):
"""
@return: a tree consisting of this tree's root connected directly to
its leaves, omitting all intervening non-terminal nodes.
@rtype: C{Tree}
"""
return Tree(self.node, self.leaves())
def height(self):
"""
@return: The height of this tree. The height of a tree
containing no children is 1; the height of a tree
containing only leaves is 2; and the height of any other
tree is one plus the maximum of its children's
heights.
@rtype: C{int}
"""
max_child_height = 0
for child in self:
if isinstance(child, Tree):
max_child_height = max(max_child_height, child.height())
else:
max_child_height = max(max_child_height, 1)
return 1 + max_child_height
def treepositions(self, order='preorder'):
"""
@param order: One of: C{preorder}, C{postorder}, C{bothorder},
C{leaves}.
"""
positions = []
if order in ('preorder', 'bothorder'): positions.append( () )
for i, child in enumerate(self):
if isinstance(child, Tree):
childpos = child.treepositions(order)
positions.extend([(i,)+p for p in childpos])
else:
positions.append( (i,) )
if order in ('postorder', 'bothorder'): positions.append( () )
return positions
def subtrees(self, filter=None):
"""
Generate all the subtrees of this tree, optionally restricted
to trees matching the filter function.
@type: filter: C{function}
@param: filter: the function to filter all local trees
"""
if not filter or filter(self):
yield self
for child in self:
if isinstance(child, Tree):
for subtree in child.subtrees(filter):
yield subtree
def productions(self):
"""
Generate the productions that correspond to the non-terminal nodes of the tree.
For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the
form P -> C1 C2 ... Cn.
@rtype: list of C{cfg.Production}s
"""
if not isinstance(self.node, str):
raise TypeError, 'Productions can only be generated from trees having node labels that are strings'
prods = [cfg.Production(cfg.Nonterminal(self.node), _child_names(self))]
for child in self:
if isinstance(child, Tree):
prods += child.productions()
return prods
#////////////////////////////////////////////////////////////
# Convert, copy
#////////////////////////////////////////////////////////////
# [classmethod]
def convert(cls, val):
"""
Convert a tree between different subtypes of Tree. C{cls} determines
which class will be used to encode the new tree.
@type val: L{Tree}
@param val: The tree that should be converted.
@return: The new C{Tree}.
"""
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
return cls(val.node, children)
else:
return val
convert = classmethod(convert)
def copy(self, deep=False):
if not deep: return self.__class__(self.node, self)
else: return self.__class__.convert(self)
def _frozen_class(self): return ImmutableTree
def freeze(self, leaf_freezer=None):
frozen_class = self._frozen_class()
if leaf_freezer is None:
newcopy = frozen_class.convert(self)
else:
newcopy = self.copy(deep=True)
for pos in newcopy.treepositions('leaves'):
newcopy[pos] = leaf_freezer(newcopy[pos])
newcopy = frozen_class.convert(newcopy)
hash(newcopy) # Make sure the leaves are hashable.
return newcopy
#////////////////////////////////////////////////////////////
# Visualization & String Representation
#////////////////////////////////////////////////////////////
def draw(self):
"""
Open a new window containing a graphical diagram of this tree.
"""
from en.parser.nltk_lite.draw.tree import draw_trees
draw_trees(self)
def __repr__(self):
childstr = ' '.join([repr(c) for c in self])
return '(%s: %s)' % (repr(self.node), childstr)
def __str__(self):
return self.pp()
def _ppflat(self, nodesep, parens, quotes):
childstrs = []
for child in self:
if isinstance(child, Tree):
childstrs.append(child._ppflat(nodesep, parens, quotes))
elif isinstance(child, str) and not quotes:
childstrs.append('%s' % child)
else:
childstrs.append('%s' % child.__repr__())
return '%s%s%s %s%s' % (parens[0], self.node, nodesep,
' '.join(childstrs), parens[1])
def pp(self, margin=70, indent=0, nodesep=':', parens='()', quotes=True):
"""
@return: A pretty-printed string representation of this tree.
@rtype: C{string}
@param margin: The right margin at which to do line-wrapping.
@type margin: C{int}
@param indent: The indentation level at which printing
begins. This number is used to decide how far to indent
subsequent lines.
@type indent: C{int}
@param nodesep: A string that is used to separate the node
from the children. E.g., the default value C{':'} gives
trees like C{(S: (NP: I) (VP: (V: saw) (NP: it)))}.
"""
# Try writing it on one line.
s = self._ppflat(nodesep, parens, quotes)
if len(s)+indent < margin:
return s
# If it doesn't fit on one line, then write it on multi-lines.
s = '%s%s%s' % (parens[0], self.node, nodesep)
for child in self:
if isinstance(child, Tree):
s += '\n'+' '*(indent+2)+child.pp(margin, indent+2,
nodesep, parens, quotes)
else:
s += '\n'+' '*(indent+2)+repr(child)
return s+parens[1]
def pp_treebank(self, margin=70, indent=0):
return self.pp(margin, indent, nodesep='', quotes=False)
def pp_latex_qtree(self):
r"""
Returns a representation of the tree compatible with the
LaTeX qtree package. This consists of the string C{\Tree}
followed by the parse tree represented in bracketed notation.
For example, the following result was generated from a parse tree of
the sentence C{The announcement astounded us}::
\Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ]
[.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ]
See U{http://www.ling.upenn.edu/advice/latex.html} for the LaTeX
style file for the qtree package.
@return: A latex qtree representation of this tree.
@rtype: C{string}
"""
return r'\Tree ' + self.pp(indent=6, nodesep='', parens=('[.', ' ]'))
class ImmutableTree(Tree):
def __setitem__(self):
raise ValueError, 'ImmutableTrees may not be modified'
def __setslice__(self):
raise ValueError, 'ImmutableTrees may not be modified'
def __delitem__(self):
raise ValueError, 'ImmutableTrees may not be modified'
def __delslice__(self):
raise ValueError, 'ImmutableTrees may not be modified'
def __iadd__(self):
raise ValueError, 'ImmutableTrees may not be modified'
def __imul__(self):
raise ValueError, 'ImmutableTrees may not be modified'
def append(self, v):
raise ValueError, 'ImmutableTrees may not be modified'
def extend(self, v):
raise ValueError, 'ImmutableTrees may not be modified'
def pop(self, v=None):
raise ValueError, 'ImmutableTrees may not be modified'
def remove(self, v):
raise ValueError, 'ImmutableTrees may not be modified'
def reverse(self):
raise ValueError, 'ImmutableTrees may not be modified'
def sort(self):
raise ValueError, 'ImmutableTrees may not be modified'
def __hash__(self):
return hash( (self.node, tuple(self)) )
######################################################################
## Probabilistic trees
######################################################################
class ProbabilisticTree(Tree, ProbabilisticMixIn):
def __init__(self, node, children, **prob_kwargs):
ProbabilisticMixIn.__init__(self, **prob_kwargs)
Tree.__init__(self, node, children)
# We have to patch up these methods to make them work right:
def _frozen_class(self): return ImmutableProbabilisticTree
def __repr__(self):
return '%s (p=%s)' % (Tree.__repr__(self), self.prob())
def __str__(self):
return '%s (p=%s)' % (self.pp(margin=60), self.prob())
def __cmp__(self, other):
c = Tree.__cmp__(self, other)
if c != 0: return c
return cmp(self.prob(), other.prob())
def __eq__(self, other):
if not isinstance(other, Tree): return False
return Tree.__eq__(self, other) and self.prob()==other.prob()
def copy(self, deep=False):
if not deep: return self.__class__(self.node, self, prob=self.prob())
else: return self.__class__.convert(self)
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val.node, children, prob=val.prob())
else:
return cls(val.node, children, prob=1.0)
else:
return val
convert = classmethod(convert)
class ImmutableProbabilisticTree(ImmutableTree, ProbabilisticMixIn):
def __init__(self, node, children, **prob_kwargs):
ProbabilisticMixIn.__init__(self, **prob_kwargs)
ImmutableTree.__init__(self, node, children)
# We have to patch up these methods to make them work right:
def _frozen_class(self): return ImmutableProbabilisticTree
def __repr__(self):
return '%s (p=%s)' % (Tree.__repr__(self), self.prob())
def __str__(self):
return '%s (p=%s)' % (self.pp(margin=60), self.prob())
def __cmp__(self, other):
c = Tree.__cmp__(self, other)
if c != 0: return c
return cmp(self.prob(), other.prob())
def __eq__(self, other):
if not isinstance(other, Tree): return False
return Tree.__eq__(self, other) and self.prob()==other.prob()
def copy(self, deep=False):
if not deep: return self.__class__(self.node, self, prob=self.prob())
else: return self.__class__.convert(self)
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val.node, children, prob=val.prob())
else:
return cls(val.node, children, prob=1)
else:
return val
convert = classmethod(convert)
def _child_names(tree):
names = []
for child in tree:
if isinstance(child, Tree):
names.append(cfg.Nonterminal(child.node))
else:
names.append(child)
return names
######################################################################
## Parsing
######################################################################
def bracket_parse(s):
"""
Parse a treebank string and return a tree. Trees are represented
as nested brackettings, e.g. (S (NP (NNP John)) (VP (V runs))).
@return: A tree corresponding to the string representation.
@rtype: C{tree}
@param s: The string to be converted
@type s: C{string}
"""
SPACE = re.compile(r'\s*')
WORD = re.compile(r'\s*([^\s\(\)]*)\s*')
# Skip any initial whitespace.
pos = SPACE.match(s).end()
stack = []
while pos < len(s):
# Beginning of a tree/subtree.
if s[pos] == '(':
match = WORD.match(s, pos+1)
stack.append(Tree(match.group(1), []))
pos = match.end()
# End of a tree/subtree.
elif s[pos] == ')':
pos = SPACE.match(s, pos+1).end()
if len(stack) == 1:
if pos != len(s): raise ValueError
tree = stack[0]
# If the tree has an extra level with node='', then get
# rid of it. (E.g., "((S (NP ...) (VP ...)))")
if tree.node == '':
tree = tree[0]
return tree
stack[-2].append(stack[-1])
stack.pop()
# Leaf token.
else:
match = WORD.match(s, pos)
leaf = match.group(1)
stack[-1].append(leaf)
pos = match.end()
raise ValueError, 'mismatched parens'
def chunk(s, chunk_node="NP", top_node="S"):
"""
Divide a string of chunked tagged text into
chunks and unchunked tokens, and produce a C{Tree}.
Chunks are marked by square brackets (C{[...]}). Words are
deliniated by whitespace, and each word should have the form
C{I{text}/I{tag}}. Words that do not contain a slash are
assigned a C{tag} of C{None}.
@return: A tree corresponding to the string representation.
@rtype: C{tree}
@param s: The string to be converted
@type s: C{string}
@param chunk_node: The label to use for chunk nodes
@type chunk_node: C{string}
@param top_node: The label to use for the root of the tree
@type top_node: C{string}
"""
WORD_OR_BRACKET = re.compile(r'\[|\]|[^\[\]\s]+')
VALID = re.compile(r'^([^\[\]]+|\[[^\[\]]*\])*$')
if not VALID.match(s):
raise ValueError, 'Invalid token string (bad brackets)'
stack = [Tree(top_node, [])]
for match in WORD_OR_BRACKET.finditer(s):
text = match.group()
if text[0] == '[':
chunk = Tree(chunk_node, [])
stack[-1].append(chunk)
stack.append(chunk)
elif text[0] == ']':
stack.pop()
else:
slash = text.rfind('/')
if slash >= 0:
tok = (text[:slash], text[slash+1:])
else:
tok = (text, None)
stack[-1].append(tok)
return stack[0]
### CONLL
_LINE_RE = re.compile('(\S+)\s+(\S+)\s+([IOB])-?(\S+)?')
def conll_chunk(s, chunk_types=("NP",), top_node="S"):
"""
@return: A chunk structure for a single sentence
encoded in the given CONLL 2000 style string.
@rtype: L{Tree}
"""
stack = [Tree(top_node, [])]
for lineno, line in enumerate(tokenize.line(s)):
# Decode the line.
match = _LINE_RE.match(line)
if match is None:
raise ValueError, 'Error on line %d' % lineno
(word, tag, state, chunk_type) = match.groups()
# If it's a chunk type we don't care about, treat it as O.
if (chunk_types is not None and
chunk_type not in chunk_types):
state = 'O'
# For "Begin"/"Outside", finish any completed chunks -
# also do so for "Inside" which don't match the previous token.
mismatch_I = state == 'I' and chunk_type != stack[-1].node
if state in 'BO' or mismatch_I:
if len(stack) == 2: stack.pop()
# For "Begin", start a new chunk.
if state == 'B' or mismatch_I:
chunk = Tree(chunk_type, [])
stack[-1].append(chunk)
stack.append(chunk)
# Add the new word token.
stack[-1].append((word, tag))
return stack[0]
### IEER
_IEER_DOC_RE = re.compile(r'<DOC>\s*'
r'(<DOCNO>\s*(?P<docno>.+?)\s*</DOCNO>\s*)?'
r'(<DOCTYPE>\s*(?P<doctype>.+?)\s*</DOCTYPE>\s*)?'
r'(<DATE_TIME>\s*(?P<date_time>.+?)\s*</DATE_TIME>\s*)?'
r'<BODY>\s*'
r'(<HEADLINE>\s*(?P<headline>.+?)\s*</HEADLINE>\s*)?'
r'<TEXT>(?P<text>.*?)</TEXT>\s*'
r'</BODY>\s*</DOC>\s*', re.DOTALL)
_IEER_TYPE_RE = re.compile('<b_\w+\s+[^>]*?type="(?P<type>\w+)"')
def _ieer_read_text(s, top_node):
stack = [Tree(top_node, [])]
for piece_m in re.finditer('<[^>]+>|[^\s<]+', s):
piece = piece_m.group()
try:
if piece.startswith('<b_'):
m = _IEER_TYPE_RE.match(piece)
if m is None: print 'XXXX', piece
chunk = Tree(m.group('type'), [])
stack[-1].append(chunk)
stack.append(chunk)
elif piece.startswith('<e_'):
stack.pop()
# elif piece.startswith('<'):
# print "ERROR:", piece
# raise ValueError # Unexpected HTML
else:
stack[-1].append(piece)
except (IndexError, ValueError):
raise ValueError('Bad IEER string (error at character %d)' %
piece_m.start())
if len(stack) != 1:
raise ValueError('Bad IEER string')
return stack[0]
def ieer_chunk(s, chunk_types = ['LOCATION', 'ORGANIZATION', 'PERSON', 'DURATION',
'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'], top_node="S"):
"""
Convert a string of chunked tagged text in the IEER named
entity format into a chunk structure. Chunks are of several
types, LOCATION, ORGANIZATION, PERSON, DURATION, DATE, CARDINAL,
PERCENT, MONEY, and MEASURE.
@return: A chunk structure containing the chunked tagged text that is
encoded in the given IEER style string.
@rtype: L{Tree}
"""
# Try looking for a single document. If that doesn't work, then just
# treat everything as if it was within the <TEXT>...</TEXT>.
m = _IEER_DOC_RE.match(s)
if m:
return {
'text': _ieer_read_text(m.group('text'), top_node),
'docno': m.group('docno'),
'doctype': m.group('doctype'),
'date_time': m.group('date_time'),
'headline': m.group('headline')
}
else:
return _ieer_read_text(s, top_node)
######################################################################
## Demonstration
######################################################################
def demo():
"""
A demonstration showing how C{Tree}s and C{Tree}s can be
used. This demonstration creates a C{Tree}, and loads a
C{Tree} from the L{treebank<nltk.corpus.treebank>} corpus,
and shows the results of calling several of their methods.
"""
from en.parser.nltk_lite.parse import tree
# Demonstrate tree parsing.
s = '(S (NP (DT the) (NN cat)) (VP (VBD ate) (NP (DT a) (NN cookie))))'
t = tree.bracket_parse(s)
print "Convert bracketed string into tree:"
print t
print "Display tree properties:"
print t.node # tree's constituent type
print t[0] # tree's first child
print t[1] # tree's second child
print t.height()
print t.leaves()
print t[1]
print t[1,1]
print t[1,1,0]
# Demonstrate tree modification.
the_cat = t[0]
the_cat.insert(1, tree.bracket_parse('(JJ big)'))
print "Tree modification:"
print t
t[1,1,1] = tree.bracket_parse('(NN cake)')
print t
print
# Demonstrate probabilistic trees.
pt = tree.ProbabilisticTree('x', ['y', 'z'], prob=0.5)
print "Probabilistic Tree:"
print pt
print
# Demonstrate parsing of treebank output format.
t = tree.bracket_parse(t.pp_treebank())[0]
print "Convert tree to bracketed string and back again:"
print t.pp_treebank()
print t
print
# Demonstrate LaTeX output
print "LaTeX output:"
print t.pp_latex_qtree()
print
# Demonstrate Productions
print "Production output:"
print t.productions()
print
# Demonstrate chunk parsing
s = "[ Pierre/NNP Vinken/NNP ] ,/, [ 61/CD years/NNS ] old/JJ ,/, will/MD join/VB [ the/DT board/NN ] ./."
from tree import chunk
print "Chunk Parsing:"
print chunk(s, chunk_node='NP').pp()
print
s = """
These DT B-NP
research NN I-NP
protocols NNS I-NP
offer VBP B-VP
to TO B-PP
the DT B-NP
patient NN I-NP
not RB O
only RB O
the DT B-NP
very RB I-NP
best JJS I-NP
therapy NN I-NP
which WDT B-NP
we PRP B-NP
have VBP B-VP
established VBN I-VP
today NN B-NP
but CC B-NP
also RB I-NP
the DT B-NP
hope NN I-NP
of IN B-PP
something NN B-NP
still RB B-ADJP
better JJR I-ADJP
. . O
"""
print conll_chunk(s, chunk_types=('NP', 'PP', 'VP')).pp()
# Demonstrate tree nodes containing objects other than strings
t.node = ('test', 3)
print t
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Shift-Reduce Parser
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from tree import *
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse import AbstractParse, cfg
from types import *
#
##//////////////////////////////////////////////////////
## Shift/Reduce Parser
##//////////////////////////////////////////////////////
class ShiftReduce(AbstractParse):
"""
A simple bottom-up CFG parser that uses two operations, "shift"
and "reduce", to find a single parse for a text.
C{ShiftReduce} maintains a stack, which records the
structure of a portion of the text. This stack is a list of
C{String}s and C{Tree}s that collectively cover a portion of
the text. For example, while parsing the sentence "the dog saw
the man" with a typical grammar, C{ShiftReduce} will produce
the following stack, which covers "the dog saw"::
[(NP: (Det: 'the') (N: 'dog')), (V: 'saw')]
C{ShiftReduce} attempts to extend the stack to cover the
entire text, and to combine the stack elements into a single tree,
producing a complete parse for the sentence.
Initially, the stack is empty. It is extended to cover the text,
from left to right, by repeatedly applying two operations:
- X{shift} moves a token from the beginning of the text to the
end of the stack.
- X{reduce} uses a CFG production to combine the rightmost stack
elements into a single C{Tree}.
Often, more than one operation can be performed on a given stack.
In this case, C{ShiftReduce} uses the following heuristics
to decide which operation to perform:
- Only shift if no reductions are available.
- If multiple reductions are available, then apply the reduction
whose CFG production is listed earliest in the grammar.
Note that these heuristics are not guaranteed to choose an
operation that leads to a parse of the text. Also, if multiple
parses exists, C{ShiftReduce} will return at most one of
them.
@see: C{nltk.cfg}
"""
def __init__(self, grammar, trace=0):
"""
Create a new C{ShiftReduce}, that uses C{grammar} to
parse texts.
@type grammar: C{Grammar}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._trace = trace
AbstractParse.__init__(self)
self._check_grammar()
def get_parse(self, tokens):
# initialize the stack.
stack = []
remaining_text = tokens
# Trace output.
if self._trace:
print 'Parsing %r' % ' '.join(tokens)
self._trace_stack(stack, remaining_text)
# iterate through the text, pushing the token onto
# the stack, then reducing the stack.
while len(remaining_text) > 0:
self._shift(stack, remaining_text)
while self._reduce(stack, remaining_text): pass
# Did we reduce everything?
if len(stack) != 1: return None
# Did we end up with the right category?
if stack[0].node != self._grammar.start().symbol():
return None
# We parsed successfully!
return stack[0]
def _shift(self, stack, remaining_text):
"""
Move a token from the beginning of C{remaining_text} to the
end of C{stack}.
@type stack: C{list} of C{String} and C{Tree}
@param stack: A list of C{String}s and C{Tree}s, encoding
the structure of the text that has been parsed so far.
@type remaining_text: C{list} of C{String}
@param remaining_text: The portion of the text that is not yet
covered by C{stack}.
@rtype: C{None}
"""
stack.append(remaining_text[0])
remaining_text.remove(remaining_text[0])
if self._trace: self._trace_shift(stack, remaining_text)
def _match_rhs(self, rhs, rightmost_stack):
"""
@rtype: C{boolean}
@return: true if the right hand side of a CFG production
matches the rightmost elements of the stack. C{rhs}
matches C{rightmost_stack} if they are the same length,
and each element of C{rhs} matches the corresponding
element of C{rightmost_stack}. A nonterminal element of
C{rhs} matches any C{Tree} whose node value is equal
to the nonterminal's symbol. A terminal element of C{rhs}
matches any C{String} whose type is equal to the terminal.
@type rhs: C{list} of (terminal and C{Nonterminal})
@param rhs: The right hand side of a CFG production.
@type rightmost_stack: C{list} of (C{String} and C{Tree})
@param rightmost_stack: The rightmost elements of the parser's
stack.
"""
if len(rightmost_stack) != len(rhs): return 0
for i in range(len(rightmost_stack)):
if isinstance(rightmost_stack[i], Tree):
if not isinstance(rhs[i], cfg.Nonterminal): return 0
if rightmost_stack[i].node != rhs[i].symbol(): return 0
else:
if isinstance(rhs[i], cfg.Nonterminal): return 0
if rightmost_stack[i] != rhs[i]: return 0
return 1
def _reduce(self, stack, remaining_text, production=None):
"""
Find a CFG production whose right hand side matches the
rightmost stack elements; and combine those stack elements
into a single C{Tree}, with the node specified by the
production's left-hand side. If more than one CFG production
matches the stack, then use the production that is listed
earliest in the grammar. The new C{Tree} replaces the
elements in the stack.
@rtype: C{Production} or C{None}
@return: If a reduction is performed, then return the CFG
production that the reduction is based on; otherwise,
return false.
@type stack: C{list} of C{String} and C{Tree}
@param stack: A list of C{String}s and C{Tree}s, encoding
the structure of the text that has been parsed so far.
@type remaining_text: C{list} of C{String}
@param remaining_text: The portion of the text that is not yet
covered by C{stack}.
"""
if production is None: productions = self._grammar.productions()
else: productions = [production]
# Try each production, in order.
for production in productions:
rhslen = len(production.rhs())
# check if the RHS of a production matches the top of the stack
if self._match_rhs(production.rhs(), stack[-rhslen:]):
# combine the tree to reflect the reduction
tree = Tree(production.lhs().symbol(), stack[-rhslen:])
stack[-rhslen:] = [tree]
# We reduced something
if self._trace:
self._trace_reduce(stack, production, remaining_text)
return production
# We didn't reduce anything
return None
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
@type trace: C{int}
@param trace: The trace level. A trace level of C{0} will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
@rtype: C{None}
"""
# 1: just show shifts.
# 2: show shifts & reduces
# 3: display which tokens & productions are shifed/reduced
self._trace = trace
def _trace_stack(self, stack, remaining_text, marker=' '):
"""
Print trace output displaying the given stack and text.
@rtype: C{None}
@param marker: A character that is printed to the left of the
stack. This is used with trace level 2 to print 'S'
before shifted stacks and 'R' before reduced stacks.
"""
str = ' '+marker+' [ '
for elt in stack:
if isinstance(elt, Tree):
str += `cfg.Nonterminal(elt.node)` + ' '
else:
str += `elt` + ' '
str += '* ' + ' '.join(remaining_text) + ']'
print str
def _trace_shift(self, stack, remaining_text):
"""
Print trace output displaying that a token has been shifted.
@rtype: C{None}
"""
if self._trace > 2: print 'Shift %r:' % stack[-1]
if self._trace == 2: self._trace_stack(stack, remaining_text, 'S')
elif self._trace > 0: self._trace_stack(stack, remaining_text)
def _trace_reduce(self, stack, production, remaining_text):
"""
Print trace output displaying that C{production} was used to
reduce C{stack}.
@rtype: C{None}
"""
if self._trace > 2:
rhs = ' '.join(production.rhs())
print 'Reduce %r <- %s' % (production.lhs(), rhs)
if self._trace == 2: self._trace_stack(stack, remaining_text, 'R')
elif self._trace > 1: self._trace_stack(stack, remaining_text)
def _check_grammar(self):
"""
Check to make sure that all of the CFG productions are
potentially useful. If any productions can never be used,
then print a warning.
@rtype: C{None}
"""
productions = self._grammar.productions()
# Any production whose RHS is an extension of another production's RHS
# will never be used.
for i in range(len(productions)):
for j in range(i+1, len(productions)):
rhs1 = productions[i].rhs()
rhs2 = productions[j].rhs()
if rhs1[:len(rhs2)] == rhs2:
print 'Warning: %r will never be used' % productions[i]
##//////////////////////////////////////////////////////
## Stepping Shift/Reduce Parser
##//////////////////////////////////////////////////////
class SteppingShiftReduce(ShiftReduce):
"""
A C{ShiftReduce} that allows you to setp through the parsing
process, performing a single operation at a time. It also allows
you to change the parser's grammar midway through parsing a text.
The C{initialize} method is used to start parsing a text.
C{shift} performs a single shift operation, and C{reduce} performs
a single reduce operation. C{step} will perform a single reduce
operation if possible; otherwise, it will perform a single shift
operation. C{parses} returns the set of parses that have been
found by the parser.
@ivar _history: A list of C{(stack, remaining_text)} pairs,
containing all of the previous states of the parser. This
history is used to implement the C{undo} operation.
@see: C{nltk.cfg}
"""
def __init__(self, grammar, trace=0):
self._grammar = grammar
self._trace = trace
self._stack = None
self._remaining_text = None
self._history = []
AbstractParse.__init__(self)
def get_parse_list(self, token):
self.initialize(token)
while self.step(): pass
return self.parses()
def stack(self):
"""
@return: The parser's stack.
@rtype: C{list} of C{String} and C{Tree}
"""
return self._stack
def remaining_text(self):
"""
@return: The portion of the text that is not yet covered by the
stack.
@rtype: C{list} of C{String}
"""
return self._remaining_text
def initialize(self, token):
"""
Start parsing a given text. This sets the parser's stack to
C{[]} and sets its remaining text to C{token['SUBTOKENS']}.
"""
self._stack = []
self._remaining_text = token
self._history = []
def step(self):
"""
Perform a single parsing operation. If a reduction is
possible, then perform that reduction, and return the
production that it is based on. Otherwise, if a shift is
possible, then perform it, and return 1. Otherwise,
return 0.
@return: 0 if no operation was performed; 1 if a shift was
performed; and the CFG production used to reduce if a
reduction was performed.
@rtype: C{Production} or C{boolean}
"""
return self.reduce() or self.shift()
def shift(self):
"""
Move a token from the beginning of the remaining text to the
end of the stack. If there are no more tokens in the
remaining text, then do nothing.
@return: True if the shift operation was successful.
@rtype: C{boolean}
"""
if len(self._remaining_text) == 0: return 0
self._history.append( (self._stack[:], self._remaining_text[:]) )
self._shift(self._stack, self._remaining_text)
return 1
def reduce(self, production=None):
"""
Use C{production} to combine the rightmost stack elements into
a single C{Tree}. If C{production} does not match the
rightmost stack elements, then do nothing.
@return: The production used to reduce the stack, if a
reduction was performed. If no reduction was performed,
return C{None}.
@rtype: C{Production} or C{None}
"""
self._history.append( (self._stack[:], self._remaining_text[:]) )
return_val = self._reduce(self._stack, self._remaining_text,
production)
if not return_val: self._history.pop()
return return_val
def undo(self):
"""
Return the parser to its state before the most recent
shift or reduce operation. Calling C{undo} repeatedly return
the parser to successively earlier states. If no shift or
reduce operations have been performed, C{undo} will make no
changes.
@return: true if an operation was successfully undone.
@rtype: C{boolean}
"""
if len(self._history) == 0: return 0
(self._stack, self._remaining_text) = self._history.pop()
return 1
def reducible_productions(self):
"""
@return: A list of the productions for which reductions are
available for the current parser state.
@rtype: C{list} of C{Production}
"""
productions = []
for production in self._grammar.productions():
rhslen = len(production.rhs())
if self._match_rhs(production.rhs(), self._stack[-rhslen:]):
productions.append(production)
return productions
def parses(self):
"""
@return: A list of the parses that have been found by this
parser so far.
@rtype: C{list} of C{Tree}
"""
if len(self._remaining_text) != 0: return []
if len(self._stack) != 1: return []
if self._stack[0].node != self._grammar.start().symbol():
return []
return self._stack
# copied from nltk.parser
def set_grammar(self, grammar):
"""
Change the grammar used to parse texts.
@param grammar: The new grammar.
@type grammar: C{CFG}
"""
self._grammar = grammar
##//////////////////////////////////////////////////////
## Demonstration Code
##//////////////////////////////////////////////////////
def demo():
"""
A demonstration of the shift-reduce parser.
"""
from en.parser.nltk_lite.parse import cfg
# Define some nonterminals
S, VP, NP, PP = cfg.nonterminals('S, VP, NP, PP')
V, N, P, Name, Det = cfg.nonterminals('V, N, P, Name, Det')
# Define a grammar.
productions = (
# Syntactic Productions
cfg.Production(S, [NP, 'saw', NP]),
cfg.Production(S, [NP, VP]),
cfg.Production(NP, [Det, N]),
cfg.Production(VP, [V, NP, PP]),
cfg.Production(NP, [Det, N, PP]),
cfg.Production(PP, [P, NP]),
# Lexical Productions
cfg.Production(NP, ['I']), cfg.Production(Det, ['the']),
cfg.Production(Det, ['a']), cfg.Production(N, ['man']),
cfg.Production(V, ['saw']), cfg.Production(P, ['in']),
cfg.Production(P, ['with']), cfg.Production(N, ['park']),
cfg.Production(N, ['dog']), cfg.Production(N, ['telescope'])
)
grammar = cfg.Grammar(S, productions)
# Tokenize a sample sentence.
sent = list(tokenize.whitespace('I saw a man in the park'))
parser = ShiftReduce(grammar)
parser.trace()
for p in parser.get_parse_list(sent):
print p
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: Chart Parser Demo
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Jean Mark Gawron <gawron@mail.sdsu.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: chart.py 3460 2006-10-06 10:39:03Z stevenbird $
"""
A graphical tool for exploring chart parsing.
Chart parsing is a flexible parsing algorithm that uses a data
structure called a "chart" to record hypotheses about syntactic
constituents. Each hypothesis is represented by a single "edge" on
the chart. A set of "chart rules" determine when new edges can be
added to the chart. This set of rules controls the overall behavior
of the parser (e.g., whether it parses top-down or bottom-up).
The chart parsing tool demonstrates the process of parsing a single
sentence, with a given grammar and lexicon. Its display is divided
into three sections: the bottom section displays the chart; the middle
section displays the sentence; and the top section displays the
partial syntax tree corresponding to the selected edge. Buttons along
the bottom of the window are used to control the execution of the
algorithm.
The chart parsing tool allows for flexible control of the parsing
algorithm. At each step of the algorithm, you can select which rule
or strategy you wish to apply. This allows you to experiment with
mixing different strategies (e.g., top-down and bottom-up). You can
exercise fine-grained control over the algorithm by selecting which
edge you wish to apply a rule to.
"""
# At some point, we should rewrite this tool to use the new canvas
# widget system.
import pickle
from tkFileDialog import asksaveasfilename, askopenfilename
import Tkinter, tkFont, tkMessageBox
import math
import os.path
from en.parser.nltk_lite.parse.chart import *
from en.parser.nltk_lite.parse import cfg
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse.tree import Tree
from en.parser.nltk_lite.draw import ShowText, EntryDialog, in_idle
from en.parser.nltk_lite.draw import MutableOptionMenu
from en.parser.nltk_lite.draw import ColorizedList, SymbolWidget, CanvasFrame
from en.parser.nltk_lite.draw.cfg import CFGEditor
from en.parser.nltk_lite.draw.tree import tree_to_treesegment, TreeSegmentWidget
# Known bug: ChartView doesn't handle edges generated by epsilon
# productions (e.g., [Production: PP -> ]) very well.
#######################################################################
# Edge List
#######################################################################
class EdgeList(ColorizedList):
ARROW = SymbolWidget.SYMBOLS['rightarrow']
def _init_colortags(self, textwidget, options):
textwidget.tag_config('terminal', foreground='#006000')
textwidget.tag_config('arrow', font='symbol', underline='0')
textwidget.tag_config('dot', foreground = '#000000')
textwidget.tag_config('nonterminal', foreground='blue',
font=('helvetica', -12, 'bold'))
def _item_repr(self, item):
contents = []
contents.append(('%s\t' % item.lhs(), 'nonterminal'))
contents.append((self.ARROW, 'arrow'))
for i, elt in enumerate(item.rhs()):
if i == item.dot():
contents.append((' *', 'dot'))
if isinstance(elt, cfg.Nonterminal):
contents.append((' %s' % elt.symbol(), 'nonterminal'))
else:
contents.append((' %r' % elt, 'terminal'))
if item.is_complete():
contents.append((' *', 'dot'))
return contents
#######################################################################
# Chart Matrix View
#######################################################################
class ChartMatrixView(object):
"""
A view of a chart that displays the contents of the corresponding matrix.
"""
def __init__(self, parent, chart, toplevel=True, title='Chart Matrix',
show_numedges=False):
self._chart = chart
self._cells = []
self._marks = []
self._selected_cell = None
if toplevel:
self._root = Tkinter.Toplevel(parent)
self._root.title(title)
self._root.bind('<Control-q>', self.destroy)
self._init_quit(self._root)
else:
self._root = Tkinter.Frame(parent)
self._init_matrix(self._root)
self._init_list(self._root)
if show_numedges:
self._init_numedges(self._root)
else:
self._numedges_label = None
self._callbacks = {}
self._num_edges = 0
self.draw()
def _init_quit(self, root):
quit = Tkinter.Button(root, text='Quit', command=self.destroy)
quit.pack(side='bottom', expand=0, fill='none')
def _init_matrix(self, root):
cframe = Tkinter.Frame(root, border=2, relief='sunken')
cframe.pack(expand=0, fill='none', padx=1, pady=3, side='top')
self._canvas = Tkinter.Canvas(cframe, width=200, height=200,
background='white')
self._canvas.pack(expand=0, fill='none')
def _init_numedges(self, root):
self._numedges_label = Tkinter.Label(root, text='0 edges')
self._numedges_label.pack(expand=0, fill='none', side='top')
def _init_list(self, root):
self._list = EdgeList(root, [], width=20, height=5)
self._list.pack(side='top', expand=1, fill='both', pady=3)
def cb(edge, self=self): self._fire_callbacks('select', edge)
self._list.add_callback('select', cb)
self._list.focus()
def destroy(self, *e):
if self._root is None: return
try: self._root.destroy()
except: pass
self._root = None
def set_chart(self, chart):
if chart is not self._chart:
self._chart = chart
self._num_edges = 0
self.draw()
def update(self):
if self._root is None: return
# Count the edges in each cell
N = len(self._cells)
cell_edges = [[0 for i in range(N)] for j in range(N)]
for edge in self._chart:
cell_edges[edge.start()][edge.end()] += 1
# Color the cells correspondingly.
for i in range(N):
for j in range(i, N):
if cell_edges[i][j] == 0:
color = 'gray20'
else:
color = ('#00%02x%02x' %
(min(255, 50+128*cell_edges[i][j]/10),
max(0, 128-128*cell_edges[i][j]/10)))
cell_tag = self._cells[i][j]
self._canvas.itemconfig(cell_tag, fill=color)
if (i,j) == self._selected_cell:
self._canvas.itemconfig(cell_tag, outline='#00ffff',
width=3)
self._canvas.tag_raise(cell_tag)
else:
self._canvas.itemconfig(cell_tag, outline='black',
width=1)
# Update the edge list.
edges = list(self._chart.select(span=self._selected_cell))
self._list.set(edges)
# Update our edge count.
self._num_edges = self._chart.num_edges()
if self._numedges_label is not None:
self._numedges_label['text'] = '%d edges' % self._num_edges
def activate(self):
self._canvas.itemconfig('inactivebox', state='hidden')
self.update()
def inactivate(self):
self._canvas.itemconfig('inactivebox', state='normal')
self.update()
def add_callback(self, event, func):
self._callbacks.setdefault(event,{})[func] = 1
def remove_callback(self, event, func=None):
if func is None: del self._callbacks[event]
else:
try: del self._callbacks[event][func]
except: pass
def _fire_callbacks(self, event, *args):
if not self._callbacks.has_key(event): return
for cb_func in self._callbacks[event].keys(): cb_func(*args)
def select_cell(self, i, j):
if self._root is None: return
# If the cell is already selected (and the chart contents
# haven't changed), then do nothing.
if ((i,j) == self._selected_cell and
self._chart.num_edges() == self._num_edges): return
self._selected_cell = (i,j)
self.update()
# Fire the callback.
self._fire_callbacks('select_cell', i, j)
def deselect_cell(self):
if self._root is None: return
self._selected_cell = None
self._list.set([])
self.update()
def _click_cell(self, i, j):
if self._selected_cell == (i,j):
self.deselect_cell()
else:
self.select_cell(i, j)
def view_edge(self, edge):
self.select_cell(*edge.span())
self._list.view(edge)
def mark_edge(self, edge):
if self._root is None: return
self.select_cell(*edge.span())
self._list.mark(edge)
def unmark_edge(self, edge=None):
if self._root is None: return
self._list.unmark(edge)
def markonly_edge(self, edge):
if self._root is None: return
self.select_cell(*edge.span())
self._list.markonly(edge)
def draw(self):
if self._root is None: return
LEFT_MARGIN = BOT_MARGIN = 15
TOP_MARGIN = 5
c = self._canvas
c.delete('all')
N = self._chart.num_leaves()+1
dx = (int(c['width'])-LEFT_MARGIN)/N
dy = (int(c['height'])-TOP_MARGIN-BOT_MARGIN)/N
c.delete('all')
# Labels and dotted lines
for i in range(N):
c.create_text(LEFT_MARGIN-2, i*dy+dy/2+TOP_MARGIN,
text=`i`, anchor='e')
c.create_text(i*dx+dx/2+LEFT_MARGIN, N*dy+TOP_MARGIN+1,
text=`i`, anchor='n')
c.create_line(LEFT_MARGIN, dy*(i+1)+TOP_MARGIN,
dx*N+LEFT_MARGIN, dy*(i+1)+TOP_MARGIN, dash='.')
c.create_line(dx*i+LEFT_MARGIN, TOP_MARGIN,
dx*i+LEFT_MARGIN, dy*N+TOP_MARGIN, dash='.')
# A box around the whole thing
c.create_rectangle(LEFT_MARGIN, TOP_MARGIN,
LEFT_MARGIN+dx*N, dy*N+TOP_MARGIN,
width=2)
# Cells
self._cells = [[None for i in range(N)] for j in range(N)]
for i in range(N):
for j in range(i, N):
t = c.create_rectangle(j*dx+LEFT_MARGIN, i*dy+TOP_MARGIN,
(j+1)*dx+LEFT_MARGIN,
(i+1)*dy+TOP_MARGIN,
fill='gray20')
self._cells[i][j] = t
def cb(event, self=self, i=i, j=j): self._click_cell(i,j)
c.tag_bind(t, '<Button-1>', cb)
# Inactive box
xmax, ymax = int(c['width']), int(c['height'])
t = c.create_rectangle(-100, -100, xmax+100, ymax+100,
fill='gray50', state='hidden',
tag='inactivebox')
c.tag_lower(t)
# Update the cells.
self.update()
def pack(self, *args, **kwargs):
self._root.pack(*args, **kwargs)
#######################################################################
# Chart Results View
#######################################################################
class ChartResultsView(object):
def __init__(self, parent, chart, grammar, toplevel=True):
self._chart = chart
self._grammar = grammar
self._trees = []
self._y = 10
self._treewidgets = []
self._selection = None
self._selectbox = None
if toplevel:
self._root = Tkinter.Toplevel(parent)
self._root.title('Chart Parsing Demo: Results')
self._root.bind('<Control-q>', self.destroy)
else:
self._root = Tkinter.Frame(parent)
# Buttons
if toplevel:
buttons = Tkinter.Frame(self._root)
buttons.pack(side='bottom', expand=0, fill='x')
Tkinter.Button(buttons, text='Quit',
command=self.destroy).pack(side='right')
Tkinter.Button(buttons, text='Print All',
command=self.print_all).pack(side='left')
Tkinter.Button(buttons, text='Print Selection',
command=self.print_selection).pack(side='left')
# Canvas frame.
self._cframe = CanvasFrame(self._root, closeenough=20)
self._cframe.pack(side='top', expand=1, fill='both')
# Initial update
self.update()
def update(self, edge=None):
if self._root is None: return
# If the edge isn't a parse edge, do nothing.
if edge is not None:
if edge.lhs() != self._grammar.start(): return
if edge.span() != (0, self._chart.num_leaves()): return
for parse in self._chart.parses(self._grammar.start()):
if parse not in self._trees:
self._add(parse)
def _add(self, parse):
# Add it to self._trees.
self._trees.append(parse)
# Create a widget for it.
c = self._cframe.canvas()
treewidget = tree_to_treesegment(c, parse)
# Add it to the canvas frame.
self._treewidgets.append(treewidget)
self._cframe.add_widget(treewidget, 10, self._y)
# Register callbacks.
treewidget.bind_click(self._click)
# Update y.
self._y = treewidget.bbox()[3] + 10
def _click(self, widget):
c = self._cframe.canvas()
if self._selection is not None:
c.delete(self._selectbox)
self._selection = widget
(x1, y1, x2, y2) = widget.bbox()
self._selectbox = c.create_rectangle(x1, y1, x2, y2,
width=2, outline='#088')
def _color(self, treewidget, color):
treewidget.node()['color'] = color
for child in treewidget.subtrees():
if isinstance(child, TreeSegmentWidget):
self._color(child, color)
else:
child['color'] = color
def print_all(self, *e):
if self._root is None: return
self._cframe.print_to_file()
def print_selection(self, *e):
if self._root is None: return
if self._selection is None:
tkMessageBox.showerror('Print Error', 'No tree selected')
else:
c = self._cframe.canvas()
for widget in self._treewidgets:
if widget is not self._selection:
self._cframe.destroy_widget(widget)
c.delete(self._selectbox)
(x1,y1,x2,y2) = self._selection.bbox()
self._selection.move(10-x1,10-y1)
c['scrollregion'] = '0 0 %s %s' % (x2-x1+20, y2-y1+20)
self._cframe.print_to_file()
# Restore our state.
self._treewidgets = [self._selection]
self.clear()
self.update()
def clear(self):
if self._root is None: return
for treewidget in self._treewidgets:
self._cframe.destroy_widget(treewidget)
self._trees = []
self._treewidgets = []
if self._selection is not None:
self._cframe.canvas().delete(self._selectbox)
self._selection = None
self._y = 10
def set_chart(self, chart):
self.clear()
self._chart = chart
self.update()
def set_grammar(self, grammar):
self.clear()
self._grammar = grammar
self.update()
def destroy(self, *e):
if self._root is None: return
try: self._root.destroy()
except: pass
self._root = None
def pack(self, *args, **kwargs):
self._root.pack(*args, **kwargs)
#######################################################################
# Chart Comparer
#######################################################################
class ChartComparer(object):
"""
@ivar _root: The root window
@ivar _charts: A dictionary mapping names to charts. When
charts are loaded, they are added to this dictionary.
@ivar _left_chart: The left L{Chart}.
@ivar _left_name: The name C{_left_chart} (derived from filename)
@ivar _left_matrix: The L{ChartMatrixView} for C{_left_chart}
@ivar _left_selector: The drop-down L{MutableOptionsMenu} used
to select C{_left_chart}.
@ivar _right_chart: The right L{Chart}.
@ivar _right_name: The name C{_right_chart} (derived from filename)
@ivar _right_matrix: The L{ChartMatrixView} for C{_right_chart}
@ivar _right_selector: The drop-down L{MutableOptionsMenu} used
to select C{_right_chart}.
@ivar _out_chart: The out L{Chart}.
@ivar _out_name: The name C{_out_chart} (derived from filename)
@ivar _out_matrix: The L{ChartMatrixView} for C{_out_chart}
@ivar _out_label: The label for C{_out_chart}.
@ivar _op_label: A Label containing the most recent operation.
"""
_OPSYMBOL = {'-': '-',
'and': SymbolWidget.SYMBOLS['intersection'],
'or': SymbolWidget.SYMBOLS['union']}
def __init__(self, *chart_filenames):
# This chart is displayed when we don't have a value (eg
# before any chart is loaded).
faketok = [''] * 8
self._emptychart = Chart(faketok)
# The left & right charts start out empty.
self._left_name = 'None'
self._right_name = 'None'
self._left_chart = self._emptychart
self._right_chart = self._emptychart
# The charts that have been loaded.
self._charts = {'None': self._emptychart}
# The output chart.
self._out_chart = self._emptychart
# The most recent operation
self._operator = None
# Set up the root window.
self._root = Tkinter.Tk()
self._root.title('Chart Comparison')
self._root.bind('<Control-q>', self.destroy)
self._root.bind('<Control-x>', self.destroy)
# Initialize all widgets, etc.
self._init_menubar(self._root)
self._init_chartviews(self._root)
self._init_divider(self._root)
self._init_buttons(self._root)
self._init_bindings(self._root)
# Load any specified charts.
for filename in chart_filenames:
self.load_chart(filename)
def destroy(self, *e):
if self._root is None: return
try: self._root.destroy()
except: pass
self._root = None
def mainloop(self, *args, **kwargs):
return
self._root.mainloop(*args, **kwargs)
#////////////////////////////////////////////////////////////
# Initialization
#////////////////////////////////////////////////////////////
def _init_menubar(self, root):
menubar = Tkinter.Menu(root)
# File menu
filemenu = Tkinter.Menu(menubar, tearoff=0)
filemenu.add_command(label='Load Chart', accelerator='Ctrl-o',
underline=0, command=self.load_chart_dialog)
filemenu.add_command(label='Save Output', accelerator='Ctrl-s',
underline=0, command=self.save_chart_dialog)
filemenu.add_separator()
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
# Compare menu
opmenu = Tkinter.Menu(menubar, tearoff=0)
opmenu.add_command(label='Intersection',
command=self._intersection,
accelerator='+')
opmenu.add_command(label='Union',
command=self._union,
accelerator='*')
opmenu.add_command(label='Difference',
command=self._difference,
accelerator='-')
opmenu.add_separator()
opmenu.add_command(label='Swap Charts',
command=self._swapcharts)
menubar.add_cascade(label='Compare', underline=0, menu=opmenu)
# Add the menu
self._root.config(menu=menubar)
def _init_divider(self, root):
divider = Tkinter.Frame(root, border=2, relief='sunken')
divider.pack(side='top', fill='x', ipady=2)
def _init_chartviews(self, root):
opfont=('symbol', -36) # Font for operator.
eqfont=('helvetica', -36) # Font for equals sign.
frame = Tkinter.Frame(root, background='#c0c0c0')
frame.pack(side='top', expand=1, fill='both')
# The left matrix.
cv1_frame = Tkinter.Frame(frame, border=3, relief='groove')
cv1_frame.pack(side='left', padx=8, pady=7, expand=1, fill='both')
self._left_selector = MutableOptionMenu(
cv1_frame, self._charts.keys(), command=self._select_left)
self._left_selector.pack(side='top', pady=5, fill='x')
self._left_matrix = ChartMatrixView(cv1_frame, self._emptychart,
toplevel=False,
show_numedges=True)
self._left_matrix.pack(side='bottom', padx=5, pady=5,
expand=1, fill='both')
self._left_matrix.add_callback('select', self.select_edge)
self._left_matrix.add_callback('select_cell', self.select_cell)
self._left_matrix.inactivate()
# The operator.
self._op_label = Tkinter.Label(frame, text=' ', width=3,
background='#c0c0c0', font=opfont)
self._op_label.pack(side='left', padx=5, pady=5)
# The right matrix.
cv2_frame = Tkinter.Frame(frame, border=3, relief='groove')
cv2_frame.pack(side='left', padx=8, pady=7, expand=1, fill='both')
self._right_selector = MutableOptionMenu(
cv2_frame, self._charts.keys(), command=self._select_right)
self._right_selector.pack(side='top', pady=5, fill='x')
self._right_matrix = ChartMatrixView(cv2_frame, self._emptychart,
toplevel=False,
show_numedges=True)
self._right_matrix.pack(side='bottom', padx=5, pady=5,
expand=1, fill='both')
self._right_matrix.add_callback('select', self.select_edge)
self._right_matrix.add_callback('select_cell', self.select_cell)
self._right_matrix.inactivate()
# The equals sign
Tkinter.Label(frame, text='=', width=3, background='#c0c0c0',
font=eqfont).pack(side='left', padx=5, pady=5)
# The output matrix.
out_frame = Tkinter.Frame(frame, border=3, relief='groove')
out_frame.pack(side='left', padx=8, pady=7, expand=1, fill='both')
self._out_label = Tkinter.Label(out_frame, text='Output')
self._out_label.pack(side='top', pady=9)
self._out_matrix = ChartMatrixView(out_frame, self._emptychart,
toplevel=False,
show_numedges=True)
self._out_matrix.pack(side='bottom', padx=5, pady=5,
expand=1, fill='both')
self._out_matrix.add_callback('select', self.select_edge)
self._out_matrix.add_callback('select_cell', self.select_cell)
self._out_matrix.inactivate()
def _init_buttons(self, root):
buttons = Tkinter.Frame(root)
buttons.pack(side='bottom', pady=5, fill='x', expand=0)
Tkinter.Button(buttons, text='Intersection',
command=self._intersection).pack(side='left')
Tkinter.Button(buttons, text='Union',
command=self._union).pack(side='left')
Tkinter.Button(buttons, text='Difference',
command=self._difference).pack(side='left')
Tkinter.Frame(buttons, width=20).pack(side='left')
Tkinter.Button(buttons, text='Swap Charts',
command=self._swapcharts).pack(side='left')
Tkinter.Button(buttons, text='Detatch Output',
command=self._detatch_out).pack(side='right')
def _init_bindings(self, root):
#root.bind('<Control-s>', self.save_chart)
root.bind('<Control-o>', self.load_chart_dialog)
#root.bind('<Control-r>', self.reset)
#////////////////////////////////////////////////////////////
# Input Handling
#////////////////////////////////////////////////////////////
def _select_left(self, name):
self._left_name = name
self._left_chart = self._charts[name]
self._left_matrix.set_chart(self._left_chart)
if name == 'None': self._left_matrix.inactivate()
self._apply_op()
def _select_right(self, name):
self._right_name = name
self._right_chart = self._charts[name]
self._right_matrix.set_chart(self._right_chart)
if name == 'None': self._right_matrix.inactivate()
self._apply_op()
def _apply_op(self):
if self._operator == '-': self._difference()
elif self._operator == 'or': self._union()
elif self._operator == 'and': self._intersection()
#////////////////////////////////////////////////////////////
# File
#////////////////////////////////////////////////////////////
CHART_FILE_TYPES = [('Pickle file', '.pickle'),
('All files', '*')]
def save_chart_dialog(self, *args):
filename = asksaveasfilename(filetypes=self.CHART_FILE_TYPES,
defaultextension='.pickle')
if not filename: return
try: pickle.dump((self._out_chart), open(filename, 'w'))
except Exception, e:
tkMessageBox.showerror('Error Saving Chart',
'Unable to open file: %r\n%s' %
(filename, e))
def load_chart_dialog(self, *args):
filename = askopenfilename(filetypes=self.CHART_FILE_TYPES,
defaultextension='.pickle')
if not filename: return
try: self.load_chart(filename)
except Exception, e:
tkMessageBox.showerror('Error Loading Chart',
'Unable to open file: %r\n%s' %
(filename, e))
def load_chart(self, filename):
chart = pickle.load(open(filename, 'r'))
name = os.path.basename(filename)
if name.endswith('.pickle'): name = name[:-7]
if name.endswith('.chart'): name = name[:-6]
self._charts[name] = chart
self._left_selector.add(name)
self._right_selector.add(name)
# If either left_matrix or right_matrix is empty, then
# display the new chart.
if self._left_chart is self._emptychart:
self._left_selector.set(name)
elif self._right_chart is self._emptychart:
self._right_selector.set(name)
def _update_chartviews(self):
self._left_matrix.update()
self._right_matrix.update()
self._out_matrix.update()
#////////////////////////////////////////////////////////////
# Selection
#////////////////////////////////////////////////////////////
def select_edge(self, edge):
if edge in self._left_chart:
self._left_matrix.markonly_edge(edge)
else:
self._left_matrix.unmark_edge()
if edge in self._right_chart:
self._right_matrix.markonly_edge(edge)
else:
self._right_matrix.unmark_edge()
if edge in self._out_chart:
self._out_matrix.markonly_edge(edge)
else:
self._out_matrix.unmark_edge()
def select_cell(self, i, j):
self._left_matrix.select_cell(i, j)
self._right_matrix.select_cell(i, j)
self._out_matrix.select_cell(i, j)
#////////////////////////////////////////////////////////////
# Operations
#////////////////////////////////////////////////////////////
def _difference(self):
if not self._checkcompat(): return
out_chart = Chart(self._left_chart.tokens())
for edge in self._left_chart:
if edge not in self._right_chart:
out_chart.insert(edge, [])
self._update('-', out_chart)
def _intersection(self):
if not self._checkcompat(): return
out_chart = Chart(self._left_chart.tokens())
for edge in self._left_chart:
if edge in self._right_chart:
out_chart.insert(edge, [])
self._update('and', out_chart)
def _union(self):
if not self._checkcompat(): return
out_chart = Chart(self._left_chart.tokens())
for edge in self._left_chart:
out_chart.insert(edge, [])
for edge in self._right_chart:
out_chart.insert(edge, [])
self._update('or', out_chart)
def _swapcharts(self):
left, right = self._left_name, self._right_name
self._left_selector.set(right)
self._right_selector.set(left)
def _checkcompat(self):
if (self._left_chart.tokens() != self._right_chart.tokens() or
self._left_chart.property_names() !=
self._right_chart.property_names() or
self._left_chart == self._emptychart or
self._right_chart == self._emptychart):
# Clear & inactivate the output chart.
self._out_chart = self._emptychart
self._out_matrix.set_chart(self._out_chart)
self._out_matrix.inactivate()
self._out_label['text'] = 'Output'
# Issue some other warning?
return False
else:
return True
def _update(self, operator, out_chart):
self._operator = operator
self._op_label['text'] = self._OPSYMBOL[operator]
self._out_chart = out_chart
self._out_matrix.set_chart(out_chart)
self._out_label['text'] = '%s %s %s' % (self._left_name,
self._operator,
self._right_name)
def _clear_out_chart(self):
self._out_chart = self._emptychart
self._out_matrix.set_chart(self._out_chart)
self._op_label['text'] = ' '
self._out_matrix.inactivate()
def _detatch_out(self):
ChartMatrixView(self._root, self._out_chart,
title=self._out_label['text'])
#######################################################################
# Chart View
#######################################################################
class ChartView(object):
"""
A component for viewing charts. This is used by C{ChartDemo} to
allow students to interactively experiment with various chart
parsing techniques. It is also used by C{Chart.draw()}.
@ivar _chart: The chart that we are giving a view of. This chart
may be modified; after it is modified, you should call
C{update}.
@ivar _sentence: The list of tokens that the chart spans.
@ivar _root: The root window.
@ivar _chart_canvas: The canvas we're using to display the chart
itself.
@ivar _tree_canvas: The canvas we're using to display the tree
that each edge spans. May be None, if we're not displaying
trees.
@ivar _sentence_canvas: The canvas we're using to display the sentence
text. May be None, if we're not displaying the sentence text.
@ivar _edgetags: A dictionary mapping from edges to the tags of
the canvas elements (lines, etc) used to display that edge.
The values of this dictionary have the form
C{(linetag, rhstag1, dottag, rhstag2, lhstag)}.
@ivar _treetags: A list of all the tags that make up the tree;
used to erase the tree (without erasing the loclines).
@ivar _chart_height: The height of the chart canvas.
@ivar _sentence_height: The height of the sentence canvas.
@ivar _tree_height: The height of the tree
@ivar _text_height: The height of a text string (in the normal
font).
@ivar _edgelevels: A list of edges at each level of the chart (the
top level is the 0th element). This list is used to remember
where edges should be drawn; and to make sure that no edges
are overlapping on the chart view.
@ivar _unitsize: Pixel size of one unit (from the location). This
is determined by the span of the chart's location, and the
width of the chart display canvas.
@ivar _fontsize: The current font size
@ivar _marks: A dictionary from edges to marks. Marks are
strings, specifying colors (e.g. 'green').
"""
_LEAF_SPACING = 10
_MARGIN = 10
_TREE_LEVEL_SIZE = 12
_CHART_LEVEL_SIZE = 40
def __init__(self, chart, root=None, **kw):
"""
Construct a new C{Chart} display.
"""
# Process keyword args.
draw_tree = kw.get('draw_tree', 0)
draw_sentence = kw.get('draw_sentence', 1)
self._fontsize = kw.get('fontsize', -12)
# The chart!
self._chart = chart
# Callback functions
self._callbacks = {}
# Keep track of drawn edges
self._edgelevels = []
self._edgetags = {}
# Keep track of which edges are marked.
self._marks = {}
# These are used to keep track of the set of tree tokens
# currently displayed in the tree canvas.
self._treetoks = []
self._treetoks_edge = None
self._treetoks_index = 0
# Keep track of the tags used to draw the tree
self._tree_tags = []
# Put multiple edges on each level?
self._compact = 0
# If they didn't provide a main window, then set one up.
if root is None:
top = Tkinter.Tk()
top.title('Chart View')
def destroy1(e, top=top): top.destroy()
def destroy2(top=top): top.destroy()
top.bind('q', destroy1)
b = Tkinter.Button(top, text='Done', command=destroy2)
b.pack(side='bottom')
self._root = top
else:
self._root = root
# Create some fonts.
self._init_fonts(root)
# Create the chart canvas.
(self._chart_sb, self._chart_canvas) = self._sb_canvas(self._root)
self._chart_canvas['height'] = 400
self._chart_canvas['closeenough'] = 15
# Create the sentence canvas.
if draw_sentence:
cframe = Tkinter.Frame(self._root, relief='sunk', border=2)
cframe.pack(fill='both', side='bottom')
self._sentence_canvas = Tkinter.Canvas(cframe, height=50)
self._sentence_canvas['background'] = '#e0e0e0'
self._sentence_canvas.pack(fill='both')
#self._sentence_canvas['height'] = self._sentence_height
else:
self._sentence_canvas = None
# Create the tree canvas.
if draw_tree:
(sb, canvas) = self._sb_canvas(self._root, 'n', 'x')
(self._tree_sb, self._tree_canvas) = (sb, canvas)
self._tree_canvas['height'] = 200
else:
self._tree_canvas = None
# Do some analysis to figure out how big the window should be
self._analyze()
self.draw()
self._resize()
self._grow()
# Set up the configure callback, which will be called whenever
# the window is resized.
self._chart_canvas.bind('<Configure>', self._configure)
def _init_fonts(self, root):
self._boldfont = tkFont.Font(family='helvetica', weight='bold',
size=self._fontsize)
self._font = tkFont.Font(family='helvetica',
size=self._fontsize)
# See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
self._sysfont = tkFont.Font(font=Tkinter.Button()["font"])
root.option_add("*Font", self._sysfont)
def _sb_canvas(self, root, expand='y',
fill='both', side='bottom'):
"""
Helper for __init__: construct a canvas with a scrollbar.
"""
cframe =Tkinter.Frame(root, relief='sunk', border=2)
cframe.pack(fill=fill, expand=expand, side=side)
canvas = Tkinter.Canvas(cframe, background='#e0e0e0')
# Give the canvas a scrollbar.
sb = Tkinter.Scrollbar(cframe, orient='vertical')
sb.pack(side='right', fill='y')
canvas.pack(side='left', fill=fill, expand='yes')
# Connect the scrollbars to the canvas.
sb['command']= canvas.yview
canvas['yscrollcommand'] = sb.set
return (sb, canvas)
def scroll_up(self, *e):
self._chart_canvas.yview('scroll', -1, 'units')
def scroll_down(self, *e):
self._chart_canvas.yview('scroll', 1, 'units')
def page_up(self, *e):
self._chart_canvas.yview('scroll', -1, 'pages')
def page_down(self, *e):
self._chart_canvas.yview('scroll', 1, 'pages')
def _grow(self):
"""
Grow the window, if necessary
"""
# Grow, if need-be
N = self._chart.num_leaves()
width = max(int(self._chart_canvas['width']),
N * self._unitsize + ChartView._MARGIN * 2 )
# It won't resize without the second (height) line, but I
# don't understand why not.
self._chart_canvas.configure(width=width)
self._chart_canvas.configure(height=self._chart_canvas['height'])
self._unitsize = (width - 2*ChartView._MARGIN) / N
# Reset the height for the sentence window.
if self._sentence_canvas is not None:
self._sentence_canvas['height'] = self._sentence_height
def set_font_size(self, size):
self._font.configure(size=-abs(size))
self._boldfont.configure(size=-abs(size))
self._sysfont.configure(size=-abs(size))
self._analyze()
self._grow()
self.draw()
def get_font_size(self):
return abs(self._fontsize)
def _configure(self, e):
"""
The configure callback. This is called whenever the window is
resized. It is also called when the window is first mapped.
It figures out the unit size, and redraws the contents of each
canvas.
"""
N = self._chart.num_leaves()
self._unitsize = (e.width - 2*ChartView._MARGIN) / N
self.draw()
def update(self, chart=None):
"""
Draw any edges that have not been drawn. This is typically
called when a after modifies the canvas that a CanvasView is
displaying. C{update} will cause any edges that have been
added to the chart to be drawn.
If update is given a C{chart} argument, then it will replace
the current chart with the given chart.
"""
if chart is not None:
self._chart = chart
self._edgelevels = []
self._marks = {}
self._analyze()
self._grow()
self.draw()
self.erase_tree()
self._resize()
else:
for edge in self._chart:
if not self._edgetags.has_key(edge):
self._add_edge(edge)
self._resize()
def _edge_conflict(self, edge, lvl):
"""
Return 1 if the given edge overlaps with any edge on the given
level. This is used by _add_edge to figure out what level a
new edge should be added to.
"""
(s1, e1) = edge.span()
for otheredge in self._edgelevels[lvl]:
(s2, e2) = otheredge.span()
if (s1 <= s2 < e1) or (s2 <= s1 < e2) or (s1==s2==e1==e2):
return 1
return 0
def _analyze_edge(self, edge):
"""
Given a new edge, recalculate:
- _text_height
- _unitsize (if the edge text is too big for the current
_unitsize, then increase _unitsize)
"""
c = self._chart_canvas
if isinstance(edge, TreeEdge):
lhs = edge.lhs()
rhselts = []
for elt in edge.rhs():
if isinstance(elt, cfg.Nonterminal):
rhselts.append(str(elt.symbol()))
else:
rhselts.append(repr(elt))
rhs = ' '.join(rhselts)
else:
lhs = edge.lhs()
rhs = ''
for s in (lhs, rhs):
tag = c.create_text(0,0, text=s,
font=self._boldfont,
anchor='nw', justify='left')
bbox = c.bbox(tag)
c.delete(tag)
width = bbox[2] #+ ChartView._LEAF_SPACING
edgelen = max(edge.length(), 1)
self._unitsize = max(self._unitsize, width/edgelen)
self._text_height = max(self._text_height, bbox[3] - bbox[1])
def _add_edge(self, edge, minlvl=0):
"""
Add a single edge to the ChartView:
- Call analyze_edge to recalculate display parameters
- Find an available level
- Call _draw_edge
"""
if self._edgetags.has_key(edge): return
self._analyze_edge(edge)
self._grow()
if not self._compact:
self._edgelevels.append([edge])
lvl = len(self._edgelevels)-1
self._draw_edge(edge, lvl)
self._resize()
return
# Figure out what level to draw the edge on.
lvl = 0
while 1:
# If this level doesn't exist yet, create it.
while lvl >= len(self._edgelevels):
self._edgelevels.append([])
self._resize()
# Check if we can fit the edge in this level.
if lvl>=minlvl and not self._edge_conflict(edge, lvl):
# Go ahead and draw it.
self._edgelevels[lvl].append(edge)
break
# Try the next level.
lvl += 1
self._draw_edge(edge, lvl)
def view_edge(self, edge):
level = None
for i in range(len(self._edgelevels)):
if edge in self._edgelevels[i]:
level = i
break
if level == None: return
# Try to view the new edge..
y = (level+1) * self._chart_level_size
dy = self._text_height + 10
self._chart_canvas.yview('moveto', 1.0)
if self._chart_height != 0:
self._chart_canvas.yview('moveto',
float(y-dy)/self._chart_height)
def _draw_edge(self, edge, lvl):
"""
Draw a single edge on the ChartView.
"""
c = self._chart_canvas
# Draw the arrow.
x1 = (edge.start() * self._unitsize + ChartView._MARGIN)
x2 = (edge.end() * self._unitsize + ChartView._MARGIN)
if x2 == x1: x2 += max(4, self._unitsize/5)
y = (lvl+1) * self._chart_level_size
linetag = c.create_line(x1, y, x2, y, arrow='last', width=3)
# Draw a label for the edge.
if isinstance(edge, TreeEdge):
rhs = []
for elt in edge.rhs():
if isinstance(elt, cfg.Nonterminal):
rhs.append(str(elt.symbol()))
else:
rhs.append(repr(elt))
pos = edge.dot()
else:
rhs = []
pos = 0
rhs1 = ' '.join(rhs[:pos])
rhs2 = ' '.join(rhs[pos:])
rhstag1 = c.create_text(x1+3, y, text=rhs1,
font=self._font,
anchor='nw')
dotx = c.bbox(rhstag1)[2] + 6
doty = (c.bbox(rhstag1)[1]+c.bbox(rhstag1)[3])/2
dottag = c.create_oval(dotx-2, doty-2, dotx+2, doty+2)
rhstag2 = c.create_text(dotx+6, y, text=rhs2,
font=self._font,
anchor='nw')
lhstag = c.create_text((x1+x2)/2, y, text=str(edge.lhs()),
anchor='s',
font=self._boldfont)
# Keep track of the edge's tags.
self._edgetags[edge] = (linetag, rhstag1,
dottag, rhstag2, lhstag)
# Register a callback for clicking on the edge.
def cb(event, self=self, edge=edge):
self._fire_callbacks('select', edge)
c.tag_bind(rhstag1, '<Button-1>', cb)
c.tag_bind(rhstag2, '<Button-1>', cb)
c.tag_bind(linetag, '<Button-1>', cb)
c.tag_bind(dottag, '<Button-1>', cb)
c.tag_bind(lhstag, '<Button-1>', cb)
self._color_edge(edge)
def _color_edge(self, edge, linecolor=None, textcolor=None):
"""
Color in an edge with the given colors.
If no colors are specified, use intelligent defaults
(dependant on selection, etc.)
"""
if not self._edgetags.has_key(edge): return
c = self._chart_canvas
if linecolor is not None and textcolor is not None:
if self._marks.has_key(edge):
linecolor = self._marks[edge]
tags = self._edgetags[edge]
c.itemconfig(tags[0], fill=linecolor)
c.itemconfig(tags[1], fill=textcolor)
c.itemconfig(tags[2], fill=textcolor,
outline=textcolor)
c.itemconfig(tags[3], fill=textcolor)
c.itemconfig(tags[4], fill=textcolor)
return
else:
N = self._chart.num_leaves()
if self._marks.has_key(edge):
self._color_edge(self._marks[edge])
if (edge.is_complete() and edge.span() == (0, N)):
self._color_edge(edge, '#084', '#042')
elif isinstance(edge, LeafEdge):
self._color_edge(edge, '#48c', '#246')
else:
self._color_edge(edge, '#00f', '#008')
def mark_edge(self, edge, mark='#0df'):
"""
Mark an edge
"""
self._marks[edge] = mark
self._color_edge(edge)
def unmark_edge(self, edge=None):
"""
Unmark an edge (or all edges)
"""
if edge == None:
old_marked_edges = self._marks.keys()
self._marks = {}
for edge in old_marked_edges:
self._color_edge(edge)
else:
del self._marks[edge]
self._color_edge(edge)
def markonly_edge(self, edge, mark='#0df'):
self.unmark_edge()
self.mark_edge(edge, mark)
def _analyze(self):
"""
Analyze the sentence string, to figure out how big a unit needs
to be, How big the tree should be, etc.
"""
# Figure out the text height and the unit size.
unitsize = 70 # min unitsize
text_height = 0
c = self._chart_canvas
# Check against all tokens
for leaf in self._chart.leaves():
tag = c.create_text(0,0, text=repr(leaf),
font=self._font,
anchor='nw', justify='left')
bbox = c.bbox(tag)
c.delete(tag)
width = bbox[2] + ChartView._LEAF_SPACING
unitsize = max(width, unitsize)
text_height = max(text_height, bbox[3] - bbox[1])
self._unitsize = unitsize
self._text_height = text_height
self._sentence_height = (self._text_height +
2*ChartView._MARGIN)
# Check against edges.
for edge in self._chart.edges():
self._analyze_edge(edge)
# Size of chart levels
self._chart_level_size = self._text_height * 2.5
# Default tree size..
self._tree_height = (3 * (ChartView._TREE_LEVEL_SIZE +
self._text_height))
# Resize the scrollregions.
self._resize()
def _resize(self):
"""
Update the scroll-regions for each canvas. This ensures that
everything is within a scroll-region, so the user can use the
scrollbars to view the entire display. This does I{not}
resize the window.
"""
c = self._chart_canvas
# Reset the chart scroll region
width = ( self._chart.num_leaves() * self._unitsize +
ChartView._MARGIN * 2 )
levels = len(self._edgelevels)
self._chart_height = (levels+2)*self._chart_level_size
c['scrollregion']=(0,0,width,self._chart_height)
# Reset the tree scroll region
if self._tree_canvas:
self._tree_canvas['scrollregion'] = (0, 0, width,
self._tree_height)
def _draw_loclines(self):
"""
Draw location lines. These are vertical gridlines used to
show where each location unit is.
"""
BOTTOM = 50000
c1 = self._tree_canvas
c2 = self._sentence_canvas
c3 = self._chart_canvas
margin = ChartView._MARGIN
self._loclines = []
for i in range(0, self._chart.num_leaves()+1):
x = i*self._unitsize + margin
if c1:
t1=c1.create_line(x, 0, x, BOTTOM)
c1.tag_lower(t1)
if c2:
t2=c2.create_line(x, 0, x, self._sentence_height)
c2.tag_lower(t2)
t3=c3.create_line(x, 0, x, BOTTOM)
c3.tag_lower(t3)
t4=c3.create_text(x+2, 0, text=`i`, anchor='nw',
font=self._font)
c3.tag_lower(t4)
#if i % 4 == 0:
# if c1: c1.itemconfig(t1, width=2, fill='gray60')
# if c2: c2.itemconfig(t2, width=2, fill='gray60')
# c3.itemconfig(t3, width=2, fill='gray60')
if i % 2 == 0:
if c1: c1.itemconfig(t1, fill='gray60')
if c2: c2.itemconfig(t2, fill='gray60')
c3.itemconfig(t3, fill='gray60')
else:
if c1: c1.itemconfig(t1, fill='gray80')
if c2: c2.itemconfig(t2, fill='gray80')
c3.itemconfig(t3, fill='gray80')
def _draw_sentence(self):
"""Draw the sentence string."""
if self._chart.num_leaves() == 0: return
c = self._sentence_canvas
margin = ChartView._MARGIN
y = ChartView._MARGIN
for i, leaf in enumerate(self._chart.leaves()):
x1 = i * self._unitsize + margin
x2 = x1 + self._unitsize
x = (x1+x2)/2
tag = c.create_text(x, y, text=repr(leaf),
font=self._font,
anchor='n', justify='left')
bbox = c.bbox(tag)
rt=c.create_rectangle(x1+2, bbox[1]-(ChartView._LEAF_SPACING/2),
x2-2, bbox[3]+(ChartView._LEAF_SPACING/2),
fill='#f0f0f0', outline='#f0f0f0')
c.tag_lower(rt)
def erase_tree(self):
for tag in self._tree_tags: self._tree_canvas.delete(tag)
self._treetoks = []
self._treetoks_edge = None
self._treetoks_index = 0
def draw_tree(self, edge=None):
if edge is None and self._treetoks_edge is None: return
if edge is None: edge = self._treetoks_edge
# If it's a new edge, then get a new list of treetoks.
if self._treetoks_edge != edge:
self._treetoks = [t for t in self._chart.trees(edge)
if isinstance(t, Tree)]
self._treetoks_edge = edge
self._treetoks_index = 0
# Make sure there's something to draw.
if len(self._treetoks) == 0: return
# Erase the old tree.
for tag in self._tree_tags: self._tree_canvas.delete(tag)
# Draw the new tree.
tree = self._treetoks[self._treetoks_index]
self._draw_treetok(tree, edge.start())
# Show how many trees are available for the edge.
self._draw_treecycle()
# Update the scroll region.
w = self._chart.num_leaves()*self._unitsize+2*ChartView._MARGIN
h = tree.height() * (ChartView._TREE_LEVEL_SIZE+self._text_height)
self._tree_canvas['scrollregion'] = (0, 0, w, h)
def cycle_tree(self):
self._treetoks_index = (self._treetoks_index+1)%len(self._treetoks)
self.draw_tree(self._treetoks_edge)
def _draw_treecycle(self):
if len(self._treetoks) <= 1: return
# Draw the label.
label = '%d Trees' % len(self._treetoks)
c = self._tree_canvas
margin = ChartView._MARGIN
right = self._chart.num_leaves()*self._unitsize+margin-2
tag = c.create_text(right, 2, anchor='ne', text=label,
font=self._boldfont)
self._tree_tags.append(tag)
_, _, _, y = c.bbox(tag)
# Draw the triangles.
for i in range(len(self._treetoks)):
x = right - 20*(len(self._treetoks)-i-1)
if i == self._treetoks_index: fill = '#084'
else: fill = '#fff'
tag = c.create_polygon(x, y+10, x-5, y, x-10, y+10,
fill=fill, outline='black')
self._tree_tags.append(tag)
# Set up a callback: show the tree if they click on its
# triangle.
def cb(event, self=self, i=i):
self._treetoks_index = i
self.draw_tree()
c.tag_bind(tag, '<Button-1>', cb)
def _draw_treetok(self, treetok, index, depth=0):
"""
@param index: The index of the first leaf in the tree.
@return: The index of the first leaf after the tree.
"""
c = self._tree_canvas
margin = ChartView._MARGIN
# Draw the children
child_xs = []
for child in treetok:
if isinstance(child, Tree):
child_x, index = self._draw_treetok(child, index, depth+1)
child_xs.append(child_x)
else:
child_xs.append((2*index+1)*self._unitsize/2 + margin)
index += 1
# If we have children, then get the node's x by averaging their
# node x's. Otherwise, make room for ourselves.
if child_xs:
nodex = sum(child_xs)/len(child_xs)
else:
# [XX] breaks for null productions.
nodex = (2*index+1)*self._unitsize/2 + margin
index += 1
# Draw the node
nodey = depth * (ChartView._TREE_LEVEL_SIZE + self._text_height)
tag = c.create_text(nodex, nodey, anchor='n', justify='center',
text=str(treetok.node), fill='#042',
font=self._boldfont)
self._tree_tags.append(tag)
# Draw lines to the children.
childy = nodey + ChartView._TREE_LEVEL_SIZE + self._text_height
for childx, child in zip(child_xs, treetok):
if isinstance(child, Tree) and child:
# A "real" tree token:
tag = c.create_line(nodex, nodey + self._text_height,
childx, childy, width=2, fill='#084')
self._tree_tags.append(tag)
if isinstance(child, Tree) and not child:
# An unexpanded tree token:
tag = c.create_line(nodex, nodey + self._text_height,
childx, childy, width=2,
fill='#048', dash='2 3')
self._tree_tags.append(tag)
if not isinstance(child, Tree):
# A leaf:
tag = c.create_line(nodex, nodey + self._text_height,
childx, 10000, width=2, fill='#084')
self._tree_tags.append(tag)
return nodex, index
def draw(self):
"""
Draw everything (from scratch).
"""
if self._tree_canvas:
self._tree_canvas.delete('all')
self.draw_tree()
if self._sentence_canvas:
self._sentence_canvas.delete('all')
self._draw_sentence()
self._chart_canvas.delete('all')
self._edgetags = {}
# Redraw any edges we erased.
for lvl in range(len(self._edgelevels)):
for edge in self._edgelevels[lvl]:
self._draw_edge(edge, lvl)
for edge in self._chart:
self._add_edge(edge)
self._draw_loclines()
def add_callback(self, event, func):
self._callbacks.setdefault(event,{})[func] = 1
def remove_callback(self, event, func=None):
if func is None: del self._callbacks[event]
else:
try: del self._callbacks[event][func]
except: pass
def _fire_callbacks(self, event, *args):
if not self._callbacks.has_key(event): return
for cb_func in self._callbacks[event].keys(): cb_func(*args)
#######################################################################
# Pseudo Earley Rule
#######################################################################
# This isn't *true* Early, since it doesn't use the separate lexicon
# dictionary. (I.e., it uses TopDownMatchRule instead of ScannerRule)
# But it's close enough for demonstration purposes.
class PseudoEarleyRule(AbstractChartRule):
NUM_EDGES = 1
_completer = CompleterRule()
_scanner = TopDownMatchRule()
_predictor = PredictorRule()
def __init__(self):
self._most_recent_rule = None
def apply_iter(self, chart, grammar, edge):
for e in self._predictor.apply_iter(chart, grammar, edge):
self._most_recent_rule = self._predictor
yield e
for e in self._scanner.apply_iter(chart, grammar, edge):
self._most_recent_rule = self._scanner
yield e
for e in self._completer.apply_iter(chart, grammar, edge):
self._most_recent_rule = self._completer
yield e
def __str__(self):
if self._most_recent_rule is self._completer:
return 'Completer Rule (aka Fundamental Rule)'
elif self._most_recent_rule is self._scanner:
return 'Scanner Rule (aka Top Down Match Rule)'
elif self._most_recent_rule is self._predictor:
return 'Predictor Rule (aka Top Down Expand Rule)'
else:
return 'Pseudo Earley Rule'
class PseudoEarleyInitRule(TopDownInitRule):
def __str__(self):
return 'Predictor Rule (aka Top Down Expand Rule)'
#######################################################################
# Edge Rules
#######################################################################
# These version of the chart rules only apply to a specific edge.
# This lets the user select an edge, and then apply a rule.
class EdgeRule(object):
"""
To create an edge rule, make an empty base class that uses
EdgeRule as the first base class, and the basic rule as the
second base class. (Order matters!)
"""
def __init__(self, edge):
super = self.__class__.__bases__[1]
self._edge = edge
self.NUM_EDGES = super.NUM_EDGES-1
def apply_iter(self, chart, grammar, *edges):
super = self.__class__.__bases__[1]
edges += (self._edge,)
for e in super.apply_iter(self, chart, grammar, *edges): yield e
def __str__(self):
super = self.__class__.__bases__[1]
return super.__str__(self)
class TopDownExpandEdgeRule(EdgeRule, TopDownExpandRule): pass
class TopDownMatchEdgeRule(EdgeRule, TopDownMatchRule): pass
class BottomUpEdgeRule(EdgeRule, BottomUpPredictRule): pass
class BottomUpInitEdgeRule(EdgeRule, BottomUpInitRule): pass
class FundamentalEdgeRule(EdgeRule, SingleEdgeFundamentalRule): pass
class PseudoEarleyEdgeRule(EdgeRule, PseudoEarleyRule): pass
#######################################################################
# Chart Demo
#######################################################################
class ChartDemo(object):
def __init__(self, grammar, tokens, title='Chart Parsing Demo'):
# Initialize the parser
self._init_parser(grammar, tokens)
self._root = None
try:
# Create the root window.
self._root = Tkinter.Tk()
self._root.title(title)
self._root.bind('<Control-q>', self.destroy)
# Set up some frames.
frame3 = Tkinter.Frame(self._root)
frame2 = Tkinter.Frame(self._root)
frame1 = Tkinter.Frame(self._root)
frame3.pack(side='bottom', fill='none')
frame2.pack(side='bottom', fill='x')
frame1.pack(side='bottom', fill='both', expand=1)
self._init_fonts(self._root)
self._init_animation()
self._init_chartview(frame1)
self._init_rulelabel(frame2)
self._init_buttons(frame3)
self._init_menubar()
self._matrix = None
self._results = None
# Set up keyboard bindings.
self._init_bindings()
except:
print 'Error creating Tree View'
self.destroy()
raise
def destroy(self, *args):
if self._root is None: return
self._root.destroy()
self._root = None
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle(): return
self._root.mainloop(*args, **kwargs)
#////////////////////////////////////////////////////////////
# Initialization Helpers
#////////////////////////////////////////////////////////////
def _init_parser(self, grammar, tokens):
self._grammar = grammar
self._tokens = tokens
self._cp = SteppingChartParse(self._grammar)
self._cp.initialize(self._tokens)
self._chart = self._cp.chart()
# The step iterator -- use this to generate new edges
self._cpstep = self._cp.step()
# The currently selected edge
self._selection = None
def _init_fonts(self, root):
# See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
self._sysfont = tkFont.Font(font=Tkinter.Button()["font"])
root.option_add("*Font", self._sysfont)
# TWhat's our font size (default=same as sysfont)
self._size = Tkinter.IntVar(root)
self._size.set(self._sysfont.cget('size'))
self._boldfont = tkFont.Font(family='helvetica', weight='bold',
size=self._size.get())
self._font = tkFont.Font(family='helvetica',
size=self._size.get())
def _init_animation(self):
# Are we stepping? (default=yes)
self._step = Tkinter.IntVar(self._root)
self._step.set(1)
# What's our animation speed (default=fast)
self._animate = Tkinter.IntVar(self._root)
self._animate.set(3) # Default speed = fast
# Are we currently animating?
self._animating = 0
def _init_chartview(self, parent):
self._cv = ChartView(self._chart, parent,
draw_tree=1, draw_sentence=1)
self._cv.add_callback('select', self._click_cv_edge)
def _init_rulelabel(self, parent):
ruletxt = 'Last edge generated by:'
self._rulelabel1 = Tkinter.Label(parent,text=ruletxt,
font=self._boldfont)
self._rulelabel2 = Tkinter.Label(parent, width=40,
relief='groove', anchor='w',
font=self._boldfont)
self._rulelabel1.pack(side='left')
self._rulelabel2.pack(side='left')
step = Tkinter.Checkbutton(parent, variable=self._step,
text='Step')
step.pack(side='right')
def _init_buttons(self, parent):
frame1 = Tkinter.Frame(parent)
frame2 = Tkinter.Frame(parent)
frame1.pack(side='bottom', fill='x')
frame2.pack(side='top', fill='none')
Tkinter.Button(frame1, text='Reset\nParser',
background='#90c0d0', foreground='black',
command=self.reset).pack(side='right')
#Tkinter.Button(frame1, text='Pause',
# background='#90c0d0', foreground='black',
# command=self.pause).pack(side='left')
Tkinter.Button(frame1, text='Top Down\nStrategy',
background='#90c0d0', foreground='black',
command=self.top_down_strategy).pack(side='left')
Tkinter.Button(frame1, text='Bottom Up\nStrategy',
background='#90c0d0', foreground='black',
command=self.bottom_up_strategy).pack(side='left')
Tkinter.Button(frame1, text='Earley\nAlgorithm',
background='#90c0d0', foreground='black',
command=self.earley_algorithm).pack(side='left')
Tkinter.Button(frame2, text='Top Down Init\nRule',
background='#90f090', foreground='black',
command=self.top_down_init).pack(side='left')
Tkinter.Button(frame2, text='Top Down Expand\nRule',
background='#90f090', foreground='black',
command=self.top_down_expand).pack(side='left')
Tkinter.Button(frame2, text='Top Down Match\nRule',
background='#90f090', foreground='black',
command=self.top_down_match).pack(side='left')
Tkinter.Frame(frame2, width=20).pack(side='left')
Tkinter.Button(frame2, text='Bottom Up Init\nRule',
background='#90f090', foreground='black',
command=self.bottom_up_init).pack(side='left')
Tkinter.Button(frame2, text='Bottom Up Predict\nRule',
background='#90f090', foreground='black',
command=self.bottom_up).pack(side='left')
Tkinter.Frame(frame2, width=20).pack(side='left')
Tkinter.Button(frame2, text='Fundamental\nRule',
background='#90f090', foreground='black',
command=self.fundamental).pack(side='left')
def _init_bindings(self):
self._root.bind('<Up>', self._cv.scroll_up)
self._root.bind('<Down>', self._cv.scroll_down)
self._root.bind('<Prior>', self._cv.page_up)
self._root.bind('<Next>', self._cv.page_down)
self._root.bind('<Control-q>', self.destroy)
self._root.bind('<Control-x>', self.destroy)
self._root.bind('<F1>', self.help)
self._root.bind('<Control-s>', self.save_chart)
self._root.bind('<Control-o>', self.load_chart)
self._root.bind('<Control-r>', self.reset)
self._root.bind('t', self.top_down_strategy)
self._root.bind('b', self.bottom_up_strategy)
self._root.bind('e', self.earley_algorithm)
self._root.bind('<space>', self._stop_animation)
self._root.bind('<Control-g>', self.edit_grammar)
self._root.bind('<Control-t>', self.edit_sentence)
# Animation speed control
self._root.bind('-', lambda e,a=self._animate:a.set(1))
self._root.bind('=', lambda e,a=self._animate:a.set(2))
self._root.bind('+', lambda e,a=self._animate:a.set(3))
# Step control
self._root.bind('s', lambda e,s=self._step:s.set(not s.get()))
def _init_menubar(self):
menubar = Tkinter.Menu(self._root)
filemenu = Tkinter.Menu(menubar, tearoff=0)
filemenu.add_command(label='Save Chart', underline=0,
command=self.save_chart, accelerator='Ctrl-s')
filemenu.add_command(label='Load Chart', underline=0,
command=self.load_chart, accelerator='Ctrl-o')
filemenu.add_command(label='Reset Chart', underline=0,
command=self.reset, accelerator='Ctrl-r')
filemenu.add_separator()
filemenu.add_command(label='Save Grammar',
command=self.save_grammar)
filemenu.add_command(label='Load Grammar',
command=self.load_grammar)
filemenu.add_separator()
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
editmenu = Tkinter.Menu(menubar, tearoff=0)
editmenu.add_command(label='Edit Grammar', underline=5,
command=self.edit_grammar,
accelerator='Ctrl-g')
editmenu.add_command(label='Edit Text', underline=5,
command=self.edit_sentence,
accelerator='Ctrl-t')
menubar.add_cascade(label='Edit', underline=0, menu=editmenu)
viewmenu = Tkinter.Menu(menubar, tearoff=0)
viewmenu.add_command(label='Chart Matrix', underline=6,
command=self.view_matrix)
viewmenu.add_command(label='Results', underline=0,
command=self.view_results)
menubar.add_cascade(label='View', underline=0, menu=viewmenu)
rulemenu = Tkinter.Menu(menubar, tearoff=0)
rulemenu.add_command(label='Top Down Strategy', underline=0,
command=self.top_down_strategy,
accelerator='t')
rulemenu.add_command(label='Bottom Up Strategy', underline=0,
command=self.bottom_up_strategy,
accelerator='b')
rulemenu.add_command(label='Earley Algorithm', underline=0,
command=self.bottom_up_strategy,
accelerator='e')
rulemenu.add_separator()
rulemenu.add_command(label='Bottom Up Init Rule',
command=self.bottom_up_init)
rulemenu.add_command(label='Bottom Up Rule',
command=self.bottom_up)
rulemenu.add_command(label='Top Down Init Rule',
command=self.top_down_init)
rulemenu.add_command(label='Top Down Expand Rule',
command=self.top_down_expand)
rulemenu.add_command(label='Top Down Match Rule',
command=self.top_down_match)
rulemenu.add_command(label='Fundamental Rule',
command=self.fundamental)
menubar.add_cascade(label='Apply', underline=0, menu=rulemenu)
animatemenu = Tkinter.Menu(menubar, tearoff=0)
animatemenu.add_checkbutton(label="Step", underline=0,
variable=self._step,
accelerator='s')
animatemenu.add_separator()
animatemenu.add_radiobutton(label="No Animation", underline=0,
variable=self._animate, value=0)
animatemenu.add_radiobutton(label="Slow Animation", underline=0,
variable=self._animate, value=1,
accelerator='-')
animatemenu.add_radiobutton(label="Normal Animation", underline=0,
variable=self._animate, value=2,
accelerator='=')
animatemenu.add_radiobutton(label="Fast Animation", underline=0,
variable=self._animate, value=3,
accelerator='+')
menubar.add_cascade(label="Animate", underline=1, menu=animatemenu)
zoommenu = Tkinter.Menu(menubar, tearoff=0)
zoommenu.add_radiobutton(label='Tiny', variable=self._size,
underline=0, value=10, command=self.resize)
zoommenu.add_radiobutton(label='Small', variable=self._size,
underline=0, value=12, command=self.resize)
zoommenu.add_radiobutton(label='Medium', variable=self._size,
underline=0, value=14, command=self.resize)
zoommenu.add_radiobutton(label='Large', variable=self._size,
underline=0, value=18, command=self.resize)
zoommenu.add_radiobutton(label='Huge', variable=self._size,
underline=0, value=24, command=self.resize)
menubar.add_cascade(label='Zoom', underline=0, menu=zoommenu)
helpmenu = Tkinter.Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', underline=0,
command=self.about)
helpmenu.add_command(label='Instructions', underline=0,
command=self.help, accelerator='F1')
menubar.add_cascade(label='Help', underline=0, menu=helpmenu)
self._root.config(menu=menubar)
#////////////////////////////////////////////////////////////
# Selection Handling
#////////////////////////////////////////////////////////////
def _click_cv_edge(self, edge):
if edge != self._selection:
# Clicking on a new edge selects it.
self._select_edge(edge)
else:
# Repeated clicks on one edge cycle its trees.
self._cv.cycle_tree()
# [XX] this can get confused if animation is running
# faster than the callbacks...
def _select_matrix_edge(self, edge):
self._select_edge(edge)
self._cv.view_edge(edge)
def _select_edge(self, edge):
self._selection = edge
# Update the chart view.
self._cv.markonly_edge(edge, '#f00')
self._cv.draw_tree(edge)
# Update the matrix view.
if self._matrix: self._matrix.markonly_edge(edge)
if self._matrix: self._matrix.view_edge(edge)
def _deselect_edge(self):
self._selection = None
# Update the chart view.
self._cv.unmark_edge()
self._cv.erase_tree()
# Update the matrix view
if self._matrix: self._matrix.unmark_edge()
def _show_new_edge(self, edge):
self._display_rule(self._cp.current_chartrule())
# Update the chart view.
self._cv.update()
self._cv.draw_tree(edge)
self._cv.markonly_edge(edge, '#0df')
self._cv.view_edge(edge)
# Update the matrix view.
if self._matrix: self._matrix.update()
if self._matrix: self._matrix.markonly_edge(edge)
if self._matrix: self._matrix.view_edge(edge)
# Update the results view.
if self._results: self._results.update(edge)
#////////////////////////////////////////////////////////////
# Help/usage
#////////////////////////////////////////////////////////////
def help(self, *e):
self._animating = 0
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(self._root, 'Help: Chart Parser Demo',
(__doc__).strip(), width=75, font='fixed')
except:
ShowText(self._root, 'Help: Chart Parser Demo',
(__doc__).strip(), width=75)
def about(self, *e):
ABOUT = ("NLTK Chart Parser Demo\n"+
"Written by Edward Loper")
tkMessageBox.showinfo('About: Chart Parser Demo', ABOUT)
#////////////////////////////////////////////////////////////
# File Menu
#////////////////////////////////////////////////////////////
CHART_FILE_TYPES = [('Pickle file', '.pickle'),
('All files', '*')]
GRAMMAR_FILE_TYPES = [('Plaintext grammar file', '.cfg'),
('Pickle file', '.pickle'),
('All files', '*')]
def load_chart(self, *args):
"Load a chart from a pickle file"
filename = askopenfilename(filetypes=self.CHART_FILE_TYPES,
defaultextension='.pickle')
if not filename: return
try:
chart = pickle.load(open(filename, 'r'))
self._chart = chart
self._cv.update(chart)
if self._matrix: self._matrix.set_chart(chart)
if self._matrix: self._matrix.deselect_cell()
if self._results: self._results.set_chart(chart)
self._cp.set_chart(chart)
except Exception, e:
raise
tkMessageBox.showerror('Error Loading Chart',
'Unable to open file: %r' % filename)
def save_chart(self, *args):
"Save a chart to a pickle file"
filename = asksaveasfilename(filetypes=self.CHART_FILE_TYPES,
defaultextension='.pickle')
if not filename: return
try:
pickle.dump(self._chart, open(filename, 'w'))
except Exception, e:
raise
tkMessageBox.showerror('Error Saving Chart',
'Unable to open file: %r' % filename)
def load_grammar(self, *args):
"Load a grammar from a pickle file"
filename = askopenfilename(filetypes=self.GRAMMAR_FILE_TYPES,
defaultextension='.cfg')
if not filename: return
try:
if filename.endswith('.pickle'):
grammar = pickle.load(open(filename, 'r'))
else:
grammar = cfg.parse_grammar(open(filename, 'r').read())
self.set_grammar(grammar)
except Exception, e:
tkMessageBox.showerror('Error Loading Grammar',
'Unable to open file: %r' % filename)
def save_grammar(self, *args):
filename = asksaveasfilename(filetypes=self.GRAMMAR_FILE_TYPES,
defaultextension='.cfg')
if not filename: return
try:
if filename.endswith('.pickle'):
pickle.dump((self._chart, self._tokens), open(filename, 'w'))
else:
file = open(filename, 'w')
prods = self._grammar.productions()
start = [p for p in prods if p.lhs() == self._grammar.start()]
rest = [p for p in prods if p.lhs() != self._grammar.start()]
for prod in start: file.write('%s\n' % prod)
for prod in rest: file.write('%s\n' % prod)
file.close()
except Exception, e:
tkMessageBox.showerror('Error Saving Grammar',
'Unable to open file: %r' % filename)
def reset(self, *args):
self._animating = 0
self._cp = SteppingChartParse(self._grammar)
self._cp.initialize(self._tokens)
self._chart = self._cp.chart()
self._cv.update(self._chart)
if self._matrix: self._matrix.set_chart(self._chart)
if self._matrix: self._matrix.deselect_cell()
if self._results: self._results.set_chart(self._chart)
self._cpstep = self._cp.step()
#////////////////////////////////////////////////////////////
# Edit
#////////////////////////////////////////////////////////////
def edit_grammar(self, *e):
CFGEditor(self._root, self._grammar, self.set_grammar)
def set_grammar(self, grammar):
self._grammar = grammar
self._cp.set_grammar(grammar)
if self._results: self._results.set_grammar(grammar)
def edit_sentence(self, *e):
sentence = ' '.join(self._tokens)
title = 'Edit Text'
instr = 'Enter a new sentence to parse.'
EntryDialog(self._root, sentence, instr, self.set_sentence, title)
def set_sentence(self, sentence):
self._tokens = list(tokenize.whitespace(sentence))
self.reset()
#////////////////////////////////////////////////////////////
# View Menu
#////////////////////////////////////////////////////////////
def view_matrix(self, *e):
if self._matrix is not None: self._matrix.destroy()
self._matrix = ChartMatrixView(self._root, self._chart)
self._matrix.add_callback('select', self._select_matrix_edge)
def view_results(self, *e):
if self._results is not None: self._results.destroy()
self._results = ChartResultsView(self._root, self._chart,
self._grammar)
#////////////////////////////////////////////////////////////
# Zoom Menu
#////////////////////////////////////////////////////////////
def resize(self):
self._animating = 0
self.set_font_size(self._size.get())
def set_font_size(self, size):
self._cv.set_font_size(size)
self._font.configure(size=-abs(size))
self._boldfont.configure(size=-abs(size))
self._sysfont.configure(size=-abs(size))
def get_font_size(self):
return abs(self._size.get())
#////////////////////////////////////////////////////////////
# Parsing
#////////////////////////////////////////////////////////////
def apply_strategy(self, strategy, edge_strategy=None):
# If we're animating, then stop.
if self._animating:
self._animating = 0
return
# Clear the rule display & mark.
self._display_rule(None)
#self._cv.unmark_edge()
if self._step.get():
selection = self._selection
if (selection is not None) and (edge_strategy is not None):
# Apply the given strategy to the selected edge.
self._cp.set_strategy([edge_strategy(selection)])
newedge = self._apply_strategy()
# If it failed, then clear the selection.
if newedge is None:
self._cv.unmark_edge()
self._selection = None
else:
self._cp.set_strategy(strategy)
self._apply_strategy()
else:
self._cp.set_strategy(strategy)
if self._animate.get():
self._animating = 1
self._animate_strategy()
else:
for edge in self._cpstep:
if edge is None: break
self._cv.update()
if self._matrix: self._matrix.update()
if self._results: self._results.update()
def _stop_animation(self, *e):
self._animating = 0
def _animate_strategy(self, speed=1):
if self._animating == 0: return
if self._apply_strategy() is not None:
if self._animate.get() == 0 or self._step.get() == 1:
return
if self._animate.get() == 1:
self._root.after(3000, self._animate_strategy)
elif self._animate.get() == 2:
self._root.after(1000, self._animate_strategy)
else:
self._root.after(20, self._animate_strategy)
def _apply_strategy(self):
new_edge = self._cpstep.next()
if new_edge is not None:
self._show_new_edge(new_edge)
return new_edge
def _display_rule(self, rule):
if rule == None:
self._rulelabel2['text'] = ''
else:
name = str(rule)
self._rulelabel2['text'] = name
size = self._cv.get_font_size()
#////////////////////////////////////////////////////////////
# Parsing Strategies
#////////////////////////////////////////////////////////////
# Basic rules:
_TD_INIT = [TopDownInitRule()]
_TD_EXPAND = [TopDownExpandRule()]
_TD_MATCH = [TopDownMatchRule()]
_BU_INIT = [BottomUpInitRule()]
_BU_RULE = [BottomUpPredictRule()]
_FUNDAMENTAL = [SingleEdgeFundamentalRule()]
_EARLEY = [PseudoEarleyRule()]
_EARLEY_INIT = [PseudoEarleyInitRule()]
# Complete strategies:
_TD_STRATEGY = _TD_INIT + _TD_EXPAND + _TD_MATCH + _FUNDAMENTAL
_BU_STRATEGY = _BU_INIT + _BU_RULE + _FUNDAMENTAL
_EARLEY = _EARLEY_INIT + _EARLEY
# Button callback functions:
def top_down_init(self, *e):
self.apply_strategy(self._TD_INIT, None)
def top_down_expand(self, *e):
self.apply_strategy(self._TD_EXPAND, TopDownExpandEdgeRule)
def top_down_match(self, *e):
self.apply_strategy(self._TD_MATCH, TopDownMatchEdgeRule)
def bottom_up_init(self, *e):
self.apply_strategy(self._BU_INIT, BottomUpInitEdgeRule)
def bottom_up(self, *e):
self.apply_strategy(self._BU_RULE, BottomUpEdgeRule)
def fundamental(self, *e):
self.apply_strategy(self._FUNDAMENTAL, FundamentalEdgeRule)
def bottom_up_strategy(self, *e):
self.apply_strategy(self._BU_STRATEGY, BottomUpEdgeRule)
def top_down_strategy(self, *e):
self.apply_strategy(self._TD_STRATEGY, TopDownExpandEdgeRule)
def earley_algorithm(self, *e):
self.apply_strategy(self._EARLEY, PseudoEarleyEdgeRule)
def demo():
grammar = cfg.parse_grammar("""
# Grammatical productions.
S -> NP VP
VP -> VP PP | V NP | V
NP -> Det N | NP PP
PP -> P NP
# Lexical productions.
NP -> 'John' | 'I'
Det -> 'the' | 'my' | 'a'
N -> 'dog' | 'cookie' | 'table' | 'cake' | 'fork'
V -> 'ate' | 'saw'
P -> 'on' | 'under' | 'with'
""")
sent = 'John ate the cake on the table with a fork'
sent = 'John ate the cake on the table'
tokens = list(tokenize.whitespace(sent))
print 'grammar= ('
for rule in grammar.productions():
print ' ', repr(rule)+','
print ')'
print 'tokens = %r' % tokens
print 'Calling "ChartDemo(grammar, tokens)"...'
ChartDemo(grammar, tokens).mainloop()
if __name__ == '__main__':
demo()
# Chart comparer:
#charts = ['/tmp/earley.pickle',
# '/tmp/topdown.pickle',
# '/tmp/bottomup.pickle']
#ChartComparer(*charts).mainloop()
#import profile
#profile.run('demo2()', '/tmp/profile.out')
#import pstats
#p = pstats.Stats('/tmp/profile.out')
#p.strip_dirs().sort_stats('time', 'cum').print_stats(60)
#p.strip_dirs().sort_stats('cum', 'time').print_stats(60)
| Python |
# Natural Language Toolkit: Dispersion Plots
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
A utility for displaying lexical dispersion.
"""
from Tkinter import Canvas
def plot(text, words, rowheight=15, rowwidth=800):
"""
Generate a lexical dispersion plot.
@param text: The source text
@type text: C{list} or C{enum} of C{str}
@param words: The target words
@type words: C{list} of C{str}
@param rowheight: Pixel height of a row
@type rowheight: C{int}
@param rowwidth: Pixel width of a row
@type rowwidth: C{int}
"""
canvas = Canvas(width=rowwidth, height=rowheight*len(words))
text = list(text)
scale = float(rowwidth)/len(text)
position = 0
for word in text:
for i in range(len(words)):
x = position * scale
if word == words[i]:
y = i * rowheight
canvas.create_line(x, y, x, y+rowheight-1)
position += 1
canvas.pack()
canvas.mainloop()
if __name__ == '__main__':
from en.parser.nltk_lite.corpora import gutenberg
from en.parser.nltk_lite.draw import dispersion
words = ['Elinor', 'Marianne', 'Edward', 'Willoughby']
dispersion.plot(gutenberg.raw('austen-sense'), words)
| Python |
# Natural Language Toolkit: Shift/Reduce Parser Demo
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: srparser.py 3460 2006-10-06 10:39:03Z stevenbird $
"""
A graphical tool for exploring the shift/reduce parser.
The shift/reduce parser maintains a stack, which records the structure
of the portion of the text that has been parsed. The stack is
initially empty. Its contents are shown on the left side of the main
canvas.
On the right side of the main canvas is the remaining text. This is
the portion of the text which has not yet been considered by the
parser.
The parser builds up a tree structure for the text using two
operations:
- "shift" moves the first token from the remaining text to the top
of the stack. In the demo, the top of the stack is its right-hand
side.
- "reduce" uses a grammar production to combine the rightmost stack
elements into a single tree token.
You can control the parser's operation by using the "shift" and
"reduce" buttons; or you can use the "step" button to let the parser
automatically decide which operation to apply. The parser uses the
following rules to decide which operation to apply:
- Only shift if no reductions are available.
- If multiple reductions are available, then apply the reduction
whose CFG production is listed earliest in the grammar.
The "reduce" button applies the reduction whose CFG production is
listed earliest in the grammar. There are two ways to manually choose
which reduction to apply:
- Click on a CFG production from the list of available reductions,
on the left side of the main window. The reduction based on that
production will be applied to the top of the stack.
- Click on one of the stack elements. A popup window will appear,
containing all available reductions. Select one, and it will be
applied to the top of the stack.
Note that reductions can only be applied to the top of the stack.
Keyboard Shortcuts::
[Space]\t Perform the next shift or reduce operation
[s]\t Perform a shift operation
[r]\t Perform a reduction operation
[Ctrl-z]\t Undo most recent operation
[Delete]\t Reset the parser
[g]\t Show/hide available production list
[Ctrl-a]\t Toggle animations
[h]\t Help
[Ctrl-p]\t Print
[q]\t Quit
"""
"""
Possible future improvements:
- button/window to change and/or select text. Just pop up a window
with an entry, and let them modify the text; and then retokenize
it? Maybe give a warning if it contains tokens whose types are
not in the grammar.
- button/window to change and/or select grammar. Select from
several alternative grammars? Or actually change the grammar? If
the later, then I'd want to define nltk.draw.cfg, which would be
responsible for that.
"""
from en.parser.nltk_lite.draw.tree import *
from en.parser.nltk_lite.draw import *
from en.parser.nltk_lite import parse
from en.parser.nltk_lite.draw.cfg import CFGEditor
from en.parser.nltk_lite import tokenize
from Tkinter import *
import tkFont
class ShiftReduceDemo(object):
"""
A graphical tool for exploring the shift/reduce parser. The tool
displays the parser's stack and the remaining text, and allows the
user to control the parser's operation. In particular, the user
can shift tokens onto the stack, and can perform reductions on the
top elements of the stack. A "step" button simply steps through
the parsing process, performing the operations that
C{parse.ShiftReduce} would use.
"""
def __init__(self, grammar, sent, trace=0):
self._sent = sent
self._parser = parse.SteppingShiftReduce(grammar, trace)
# Set up the main window.
self._top = Tk()
self._top.title('Shift Reduce Parser Demo')
# Animations. animating_lock is a lock to prevent the demo
# from performing new operations while it's animating.
self._animating_lock = 0
self._animate = IntVar(self._top)
self._animate.set(10) # = medium
# The user can hide the grammar.
self._show_grammar = IntVar(self._top)
self._show_grammar.set(1)
# Initialize fonts.
self._init_fonts(self._top)
# Set up key bindings.
self._init_bindings()
# Create the basic frames.
self._init_menubar(self._top)
self._init_buttons(self._top)
self._init_feedback(self._top)
self._init_grammar(self._top)
self._init_canvas(self._top)
# A popup menu for reducing.
self._reduce_menu = Menu(self._canvas, tearoff=0)
# Reset the demo, and set the feedback frame to empty.
self.reset()
self._lastoper1['text'] = ''
#########################################
## Initialization Helpers
#########################################
def _init_fonts(self, root):
# See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
self._sysfont = tkFont.Font(font=Button()["font"])
root.option_add("*Font", self._sysfont)
# TWhat's our font size (default=same as sysfont)
self._size = IntVar(root)
self._size.set(self._sysfont.cget('size'))
self._boldfont = tkFont.Font(family='helvetica', weight='bold',
size=self._size.get())
self._font = tkFont.Font(family='helvetica',
size=self._size.get())
def _init_grammar(self, parent):
# Grammar view.
self._prodframe = listframe = Frame(parent)
self._prodframe.pack(fill='both', side='left', padx=2)
self._prodlist_label = Label(self._prodframe,
font=self._boldfont,
text='Available Reductions')
self._prodlist_label.pack()
self._prodlist = Listbox(self._prodframe, selectmode='single',
relief='groove', background='white',
foreground='#909090',
font=self._font,
selectforeground='#004040',
selectbackground='#c0f0c0')
self._prodlist.pack(side='right', fill='both', expand=1)
self._productions = list(self._parser.grammar().productions())
for production in self._productions:
self._prodlist.insert('end', (' %s' % production))
self._prodlist.config(height=min(len(self._productions), 25))
# Add a scrollbar if there are more than 25 productions.
if 1:#len(self._productions) > 25:
listscroll = Scrollbar(self._prodframe,
orient='vertical')
self._prodlist.config(yscrollcommand = listscroll.set)
listscroll.config(command=self._prodlist.yview)
listscroll.pack(side='left', fill='y')
# If they select a production, apply it.
self._prodlist.bind('<<ListboxSelect>>', self._prodlist_select)
# When they hover over a production, highlight it.
self._hover = -1
self._prodlist.bind('<Motion>', self._highlight_hover)
self._prodlist.bind('<Leave>', self._clear_hover)
def _init_bindings(self):
# Quit
self._top.bind('<Control-q>', self.destroy)
self._top.bind('<Control-x>', self.destroy)
self._top.bind('<Alt-q>', self.destroy)
self._top.bind('<Alt-x>', self.destroy)
# Ops (step, shift, reduce, undo)
self._top.bind('<space>', self.step)
self._top.bind('<s>', self.shift)
self._top.bind('<Alt-s>', self.shift)
self._top.bind('<Control-s>', self.shift)
self._top.bind('<r>', self.reduce)
self._top.bind('<Alt-r>', self.reduce)
self._top.bind('<Control-r>', self.reduce)
self._top.bind('<Delete>', self.reset)
self._top.bind('<u>', self.undo)
self._top.bind('<Alt-u>', self.undo)
self._top.bind('<Control-u>', self.undo)
self._top.bind('<Control-z>', self.undo)
self._top.bind('<BackSpace>', self.undo)
# Misc
self._top.bind('<Control-p>', self.postscript)
self._top.bind('<Control-h>', self.help)
self._top.bind('<F1>', self.help)
self._top.bind('<Control-g>', self.edit_grammar)
self._top.bind('<Control-t>', self.edit_sentence)
# Animation speed control
self._top.bind('-', lambda e,a=self._animate:a.set(20))
self._top.bind('=', lambda e,a=self._animate:a.set(10))
self._top.bind('+', lambda e,a=self._animate:a.set(4))
def _init_buttons(self, parent):
# Set up the frames.
self._buttonframe = buttonframe = Frame(parent)
buttonframe.pack(fill='none', side='bottom')
Button(buttonframe, text='Step',
background='#90c0d0', foreground='black',
command=self.step,).pack(side='left')
Button(buttonframe, text='Shift', underline=0,
background='#90f090', foreground='black',
command=self.shift).pack(side='left')
Button(buttonframe, text='Reduce', underline=0,
background='#90f090', foreground='black',
command=self.reduce).pack(side='left')
Button(buttonframe, text='Undo', underline=0,
background='#f0a0a0', foreground='black',
command=self.undo).pack(side='left')
def _init_menubar(self, parent):
menubar = Menu(parent)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='Reset Parser', underline=0,
command=self.reset, accelerator='Del')
filemenu.add_command(label='Print to Postscript', underline=0,
command=self.postscript, accelerator='Ctrl-p')
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label='Edit Grammar', underline=5,
command=self.edit_grammar,
accelerator='Ctrl-g')
editmenu.add_command(label='Edit Text', underline=5,
command=self.edit_sentence,
accelerator='Ctrl-t')
menubar.add_cascade(label='Edit', underline=0, menu=editmenu)
rulemenu = Menu(menubar, tearoff=0)
rulemenu.add_command(label='Step', underline=1,
command=self.step, accelerator='Space')
rulemenu.add_separator()
rulemenu.add_command(label='Shift', underline=0,
command=self.shift, accelerator='Ctrl-s')
rulemenu.add_command(label='Reduce', underline=0,
command=self.reduce, accelerator='Ctrl-r')
rulemenu.add_separator()
rulemenu.add_command(label='Undo', underline=0,
command=self.undo, accelerator='Ctrl-u')
menubar.add_cascade(label='Apply', underline=0, menu=rulemenu)
viewmenu = Menu(menubar, tearoff=0)
viewmenu.add_checkbutton(label="Show Grammar", underline=0,
variable=self._show_grammar,
command=self._toggle_grammar)
viewmenu.add_separator()
viewmenu.add_radiobutton(label='Tiny', variable=self._size,
underline=0, value=10, command=self.resize)
viewmenu.add_radiobutton(label='Small', variable=self._size,
underline=0, value=12, command=self.resize)
viewmenu.add_radiobutton(label='Medium', variable=self._size,
underline=0, value=14, command=self.resize)
viewmenu.add_radiobutton(label='Large', variable=self._size,
underline=0, value=18, command=self.resize)
viewmenu.add_radiobutton(label='Huge', variable=self._size,
underline=0, value=24, command=self.resize)
menubar.add_cascade(label='View', underline=0, menu=viewmenu)
animatemenu = Menu(menubar, tearoff=0)
animatemenu.add_radiobutton(label="No Animation", underline=0,
variable=self._animate, value=0)
animatemenu.add_radiobutton(label="Slow Animation", underline=0,
variable=self._animate, value=20,
accelerator='-')
animatemenu.add_radiobutton(label="Normal Animation", underline=0,
variable=self._animate, value=10,
accelerator='=')
animatemenu.add_radiobutton(label="Fast Animation", underline=0,
variable=self._animate, value=4,
accelerator='+')
menubar.add_cascade(label="Animate", underline=1, menu=animatemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', underline=0,
command=self.about)
helpmenu.add_command(label='Instructions', underline=0,
command=self.help, accelerator='F1')
menubar.add_cascade(label='Help', underline=0, menu=helpmenu)
parent.config(menu=menubar)
def _init_feedback(self, parent):
self._feedbackframe = feedbackframe = Frame(parent)
feedbackframe.pack(fill='x', side='bottom', padx=3, pady=3)
self._lastoper_label = Label(feedbackframe, text='Last Operation:',
font=self._font)
self._lastoper_label.pack(side='left')
lastoperframe = Frame(feedbackframe, relief='sunken', border=1)
lastoperframe.pack(fill='x', side='right', expand=1, padx=5)
self._lastoper1 = Label(lastoperframe, foreground='#007070',
background='#f0f0f0', font=self._font)
self._lastoper2 = Label(lastoperframe, anchor='w', width=30,
foreground='#004040', background='#f0f0f0',
font=self._font)
self._lastoper1.pack(side='left')
self._lastoper2.pack(side='left', fill='x', expand=1)
def _init_canvas(self, parent):
self._cframe = CanvasFrame(parent, background='white',
width=525, closeenough=10,
border=2, relief='sunken')
self._cframe.pack(expand=1, fill='both', side='top', pady=2)
canvas = self._canvas = self._cframe.canvas()
self._stackwidgets = []
self._rtextwidgets = []
self._titlebar = canvas.create_rectangle(0,0,0,0, fill='#c0f0f0',
outline='black')
self._exprline = canvas.create_line(0,0,0,0, dash='.')
self._stacktop = canvas.create_line(0,0,0,0, fill='#408080')
size = self._size.get()+4
self._stacklabel = TextWidget(canvas, 'Stack', color='#004040',
font=self._boldfont)
self._rtextlabel = TextWidget(canvas, 'Remaining Text',
color='#004040', font=self._boldfont)
self._cframe.add_widget(self._stacklabel)
self._cframe.add_widget(self._rtextlabel)
#########################################
## Main draw procedure
#########################################
def _redraw(self):
scrollregion = self._canvas['scrollregion'].split()
(cx1, cy1, cx2, cy2) = [int(c) for c in scrollregion]
# Delete the old stack & rtext widgets.
for stackwidget in self._stackwidgets:
self._cframe.destroy_widget(stackwidget)
self._stackwidgets = []
for rtextwidget in self._rtextwidgets:
self._cframe.destroy_widget(rtextwidget)
self._rtextwidgets = []
# Position the titlebar & exprline
(x1, y1, x2, y2) = self._stacklabel.bbox()
y = y2-y1+10
self._canvas.coords(self._titlebar, -5000, 0, 5000, y-4)
self._canvas.coords(self._exprline, 0, y*2-10, 5000, y*2-10)
# Position the titlebar labels..
(x1, y1, x2, y2) = self._stacklabel.bbox()
self._stacklabel.move(5-x1, 3-y1)
(x1, y1, x2, y2) = self._rtextlabel.bbox()
self._rtextlabel.move(cx2-x2-5, 3-y1)
# Draw the stack.
stackx = 5
for tok in self._parser.stack():
if isinstance(tok, parse.Tree):
attribs = {'tree_color': '#4080a0', 'tree_width': 2,
'node_font': self._boldfont,
'node_color': '#006060',
'leaf_color': '#006060', 'leaf_font':self._font}
widget = tree_to_treesegment(self._canvas, tok,
**attribs)
widget.node()['color'] = '#000000'
else:
widget = TextWidget(self._canvas, tok,
color='#000000', font=self._font)
widget.bind_click(self._popup_reduce)
self._stackwidgets.append(widget)
self._cframe.add_widget(widget, stackx, y)
stackx = widget.bbox()[2] + 10
# Draw the remaining text.
rtextwidth = 0
for tok in self._parser.remaining_text():
widget = TextWidget(self._canvas, tok,
color='#000000', font=self._font)
self._rtextwidgets.append(widget)
self._cframe.add_widget(widget, rtextwidth, y)
rtextwidth = widget.bbox()[2] + 4
# Allow enough room to shift the next token (for animations)
if len(self._rtextwidgets) > 0:
stackx += self._rtextwidgets[0].width()
# Move the remaining text to the correct location (keep it
# right-justified, when possible); and move the remaining text
# label, if necessary.
stackx = max(stackx, self._stacklabel.width()+25)
rlabelwidth = self._rtextlabel.width()+10
if stackx >= cx2-max(rtextwidth, rlabelwidth):
cx2 = stackx + max(rtextwidth, rlabelwidth)
for rtextwidget in self._rtextwidgets:
rtextwidget.move(4+cx2-rtextwidth, 0)
self._rtextlabel.move(cx2-self._rtextlabel.bbox()[2]-5, 0)
midx = (stackx + cx2-max(rtextwidth, rlabelwidth))/2
self._canvas.coords(self._stacktop, midx, 0, midx, 5000)
(x1, y1, x2, y2) = self._stacklabel.bbox()
# Set up binding to allow them to shift a token by dragging it.
if len(self._rtextwidgets) > 0:
def drag_shift(widget, midx=midx, self=self):
if widget.bbox()[0] < midx: self.shift()
else: self._redraw()
self._rtextwidgets[0].bind_drag(drag_shift)
self._rtextwidgets[0].bind_click(self.shift)
# Draw the stack top.
self._highlight_productions()
def _draw_stack_top(self, widget):
# hack..
midx = widget.bbox()[2]+50
self._canvas.coords(self._stacktop, midx, 0, midx, 5000)
def _highlight_productions(self):
# Highlight the productions that can be reduced.
self._prodlist.selection_clear(0, 'end')
for prod in self._parser.reducible_productions():
index = self._productions.index(prod)
self._prodlist.selection_set(index)
#########################################
## Button Callbacks
#########################################
def destroy(self, *e):
if self._top is None: return
self._top.destroy()
self._top = None
def reset(self, *e):
self._parser.initialize(self._sent)
self._lastoper1['text'] = 'Reset Demo'
self._lastoper2['text'] = ''
self._redraw()
def step(self, *e):
if self.reduce(): return 1
elif self.shift(): return 1
else:
if len(self._parser.parses()) > 0:
self._lastoper1['text'] = 'Finished:'
self._lastoper2['text'] = 'Success'
else:
self._lastoper1['text'] = 'Finished:'
self._lastoper2['text'] = 'Failure'
def shift(self, *e):
if self._animating_lock: return
if self._parser.shift():
tok = self._parser.stack()[-1]
self._lastoper1['text'] = 'Shift:'
self._lastoper2['text'] = '%r' % tok
if self._animate.get():
self._animate_shift()
else:
self._redraw()
return 1
return 0
def reduce(self, *e):
if self._animating_lock: return
production = self._parser.reduce()
if production:
self._lastoper1['text'] = 'Reduce:'
self._lastoper2['text'] = '%s' % production
if self._animate.get():
self._animate_reduce()
else:
self._redraw()
return production
def undo(self, *e):
if self._animating_lock: return
if self._parser.undo():
self._redraw()
def postscript(self, *e):
self._cframe.print_to_file()
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle(): return
self._top.mainloop(*args, **kwargs)
#########################################
## Menubar callbacks
#########################################
def resize(self, size=None):
if size is not None: self._size.set(size)
size = self._size.get()
self._font.configure(size=-(abs(size)))
self._boldfont.configure(size=-(abs(size)))
self._sysfont.configure(size=-(abs(size)))
#self._stacklabel['font'] = ('helvetica', -size-4, 'bold')
#self._rtextlabel['font'] = ('helvetica', -size-4, 'bold')
#self._lastoper_label['font'] = ('helvetica', -size)
#self._lastoper1['font'] = ('helvetica', -size)
#self._lastoper2['font'] = ('helvetica', -size)
#self._prodlist['font'] = ('helvetica', -size)
#self._prodlist_label['font'] = ('helvetica', -size-2, 'bold')
self._redraw()
def help(self, *e):
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(self._top, 'Help: Chart Parser Demo',
(__doc__).strip(), width=75, font='fixed')
except:
ShowText(self._top, 'Help: Chart Parser Demo',
(__doc__).strip(), width=75)
def about(self, *e):
ABOUT = ("NLTK Shift-Reduce Parser Demo\n"+
"Written by Edward Loper")
TITLE = 'About: Shift-Reduce Parser Demo'
try:
from tkMessageBox import Message
Message(message=ABOUT, title=TITLE).show()
except:
ShowText(self._top, TITLE, ABOUT)
def edit_grammar(self, *e):
CFGEditor(self._top, self._parser.grammar(), self.set_grammar)
def set_grammar(self, grammar):
self._parser.set_grammar(grammar)
self._productions = list(grammar.productions())
self._prodlist.delete(0, 'end')
for production in self._productions:
self._prodlist.insert('end', (' %s' % production))
def edit_sentence(self, *e):
sentence = ' '.join(self._sent)
title = 'Edit Text'
instr = 'Enter a new sentence to parse.'
EntryDialog(self._top, sentence, instr, self.set_sentence, title)
def set_sentence(self, sentence):
self._sent = list(tokenize.whitespace(sentence)) #[XX] use tagged?
self.reset()
#########################################
## Reduce Production Selection
#########################################
def _toggle_grammar(self, *e):
if self._show_grammar.get():
self._prodframe.pack(fill='both', side='left', padx=2,
after=self._feedbackframe)
self._lastoper1['text'] = 'Show Grammar'
else:
self._prodframe.pack_forget()
self._lastoper1['text'] = 'Hide Grammar'
self._lastoper2['text'] = ''
def _prodlist_select(self, event):
selection = self._prodlist.curselection()
if len(selection) != 1: return
index = int(selection[0])
production = self._parser.reduce(self._productions[index])
if production:
self._lastoper1['text'] = 'Reduce:'
self._lastoper2['text'] = '%s' % production
if self._animate.get():
self._animate_reduce()
else:
self._redraw()
else:
# Reset the production selections.
self._prodlist.selection_clear(0, 'end')
for prod in self._parser.reducible_productions():
index = self._productions.index(prod)
self._prodlist.selection_set(index)
def _popup_reduce(self, widget):
# Remove old commands.
productions = self._parser.reducible_productions()
if len(productions) == 0: return
self._reduce_menu.delete(0, 'end')
for production in productions:
self._reduce_menu.add_command(label=str(production),
command=self.reduce)
self._reduce_menu.post(self._canvas.winfo_pointerx(),
self._canvas.winfo_pointery())
#########################################
## Animations
#########################################
def _animate_shift(self):
# What widget are we shifting?
widget = self._rtextwidgets[0]
# Where are we shifting from & to?
right = widget.bbox()[0]
if len(self._stackwidgets) == 0: left = 5
else: left = self._stackwidgets[-1].bbox()[2]+10
# Start animating.
dt = self._animate.get()
dx = (left-right)*1.0/dt
self._animate_shift_frame(dt, widget, dx)
def _animate_shift_frame(self, frame, widget, dx):
if frame > 0:
self._animating_lock = 1
widget.move(dx, 0)
self._top.after(10, self._animate_shift_frame,
frame-1, widget, dx)
else:
# but: stacktop??
# Shift the widget to the stack.
del self._rtextwidgets[0]
self._stackwidgets.append(widget)
self._animating_lock = 0
# Display the available productions.
self._draw_stack_top(widget)
self._highlight_productions()
def _animate_reduce(self):
# What widgets are we shifting?
numwidgets = len(self._parser.stack()[-1]) # number of children
widgets = self._stackwidgets[-numwidgets:]
# How far are we moving?
if isinstance(widgets[0], TreeSegmentWidget):
ydist = 15 + widgets[0].node().height()
else:
ydist = 15 + widgets[0].height()
# Start animating.
dt = self._animate.get()
dy = ydist*2.0/dt
self._animate_reduce_frame(dt/2, widgets, dy)
def _animate_reduce_frame(self, frame, widgets, dy):
if frame > 0:
self._animating_lock = 1
for widget in widgets: widget.move(0, dy)
self._top.after(10, self._animate_reduce_frame,
frame-1, widgets, dy)
else:
del self._stackwidgets[-len(widgets):]
for widget in widgets:
self._cframe.remove_widget(widget)
tok = self._parser.stack()[-1]
if not isinstance(tok, parse.Tree): raise ValueError()
label = TextWidget(self._canvas, str(tok.node), color='#006060',
font=self._boldfont)
widget = TreeSegmentWidget(self._canvas, label, widgets,
width=2)
(x1, y1, x2, y2) = self._stacklabel.bbox()
y = y2-y1+10
if not self._stackwidgets: x = 5
else: x = self._stackwidgets[-1].bbox()[2] + 10
self._cframe.add_widget(widget, x, y)
self._stackwidgets.append(widget)
# Display the available productions.
self._draw_stack_top(widget)
self._highlight_productions()
# # Delete the old widgets..
# del self._stackwidgets[-len(widgets):]
# for widget in widgets:
# self._cframe.destroy_widget(widget)
#
# # Make a new one.
# tok = self._parser.stack()[-1]
# if isinstance(tok, parse.Tree):
# attribs = {'tree_color': '#4080a0', 'tree_width': 2,
# 'node_font': bold, 'node_color': '#006060',
# 'leaf_color': '#006060', 'leaf_font':self._font}
# widget = tree_to_treesegment(self._canvas, tok.type(),
# **attribs)
# widget.node()['color'] = '#000000'
# else:
# widget = TextWidget(self._canvas, tok.type(),
# color='#000000', font=self._font)
# widget.bind_click(self._popup_reduce)
# (x1, y1, x2, y2) = self._stacklabel.bbox()
# y = y2-y1+10
# if not self._stackwidgets: x = 5
# else: x = self._stackwidgets[-1].bbox()[2] + 10
# self._cframe.add_widget(widget, x, y)
# self._stackwidgets.append(widget)
#self._redraw()
self._animating_lock = 0
#########################################
## Hovering.
#########################################
def _highlight_hover(self, event):
# What production are we hovering over?
index = self._prodlist.nearest(event.y)
if self._hover == index: return
# Clear any previous hover highlighting.
self._clear_hover()
# If the production corresponds to an available reduction,
# highlight the stack.
selection = [int(s) for s in self._prodlist.curselection()]
if index in selection:
rhslen = len(self._productions[index].rhs())
for stackwidget in self._stackwidgets[-rhslen:]:
if isinstance(stackwidget, TreeSegmentWidget):
stackwidget.node()['color'] = '#00a000'
else:
stackwidget['color'] = '#00a000'
# Remember what production we're hovering over.
self._hover = index
def _clear_hover(self, *event):
# Clear any previous hover highlighting.
if self._hover == -1: return
self._hover = -1
for stackwidget in self._stackwidgets:
if isinstance(stackwidget, TreeSegmentWidget):
stackwidget.node()['color'] = 'black'
else:
stackwidget['color'] = 'black'
def demo():
"""
Create a shift reduce parser demo, using a simple grammar and
text.
"""
from en.parser.nltk_lite.parse import cfg
nonterminals = 'S VP NP PP P N Name V Det'
(S, VP, NP, PP, P, N, Name, V, Det) = [cfg.Nonterminal(s)
for s in nonterminals.split()]
productions = (
# Syntactic Productions
cfg.Production(S, [NP, VP]),
cfg.Production(NP, [Det, N]),
cfg.Production(NP, [NP, PP]),
cfg.Production(VP, [VP, PP]),
cfg.Production(VP, [V, NP, PP]),
cfg.Production(VP, [V, NP]),
cfg.Production(PP, [P, NP]),
# Lexical Productions
cfg.Production(NP, ['I']), cfg.Production(Det, ['the']),
cfg.Production(Det, ['a']), cfg.Production(N, ['man']),
cfg.Production(V, ['saw']), cfg.Production(P, ['in']),
cfg.Production(P, ['with']), cfg.Production(N, ['park']),
cfg.Production(N, ['dog']), cfg.Production(N, ['statue']),
cfg.Production(Det, ['my']),
)
grammar = cfg.Grammar(S, productions)
# tokenize the sentence
sent = list(tokenize.whitespace('my dog saw a man in the park with a statue'))
ShiftReduceDemo(grammar, sent).mainloop()
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: Simple Plotting
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: plot.py 3460 2006-10-06 10:39:03Z stevenbird $
"""
A simple tool for plotting functions. Each new C{Plot} object opens a
new window, containing the plot for a sinlge function. See the
documentation for L{Plot} for information about creating new plots.
Example plots
=============
Plot sin(x) from -10 to 10, with a step of 0.1:
>>> Plot(math.sin)
Plot cos(x) from 0 to 2*pi, with a step of 0.01:
>>> Plot(math.cos, slice(0, 2*math.pi, 0.01))
Plot a list of points (connected by lines).
>>> points = ([1,1], [3,8], [5,3], [6,12], [1,24])
>>> Plot(points)
Plot a list of y-values (connected by lines). Each value[i] is
plotted at x=i.
>>> values = [x**2 for x in range(200)]
>>> Plot(values)
Plot a function with logarithmic axes.
>>> def f(x): return 5*x**2+2*x+8
>>> Plot(f, slice(1,10,.1), scale='log')
Plot the same function with semi-logarithmic axes.
>>> Plot(f, slice(1,10,.1),
scale='log-linear') # logarithmic x; linear y
>>> Plot(f, slice(1,10,.1),
scale='linear-log') # linear x; logarithmic y
BLT
===
If U{BLT<http://incrtcl.sourceforge.net/blt/>} and
U{PMW<http://pmw.sourceforge.net/>} are both installed, then BLT is
used to plot graphs. Otherwise, a simple Tkinter-based implementation
is used. The Tkinter-based implementation does I{not} display axis
values.
@group Plot Frame Implementations: PlotFrameI, CanvasPlotFrame,
BLTPlotFrame
"""
# This is used by "from en.parser.nltk_lite.draw.plot import *". to decide what to
# import. It also declares to nltk that only the Plot class is public.
__all__ = ['Plot']
# Implementation note:
# For variable names, I use x,y for pixel coordinates; and i,j for
# plot coordinates.
# Delegate to BLT?
#
#
from types import *
from math import log, log10, ceil, floor
import Tkinter, sys, time
from en.parser.nltk_lite.draw import ShowText, in_idle
class PlotFrameI(object):
"""
A frame for plotting graphs. If BLT is present, then we use
BLTPlotFrame, since it's nicer. But we fall back on
CanvasPlotFrame if BLTPlotFrame is unavaibale.
"""
def postscript(self, filename):
'Print the contents of the plot to the given file'
raise AssertionError, 'PlotFrameI is an interface'
def config_axes(self, xlog, ylog):
'Set the scale for the axes (linear/logarithmic)'
raise AssertionError, 'PlotFrameI is an interface'
def invtransform(self, x, y):
'Transform pixel coordinates to plot coordinates'
raise AssertionError, 'PlotFrameI is an interface'
def zoom(self, i1, j1, i2, j2):
'Zoom to the given range'
raise AssertionError, 'PlotFrameI is an interface'
def visible_area(self):
'Return the visible area rect (in plot coordinates)'
raise AssertionError, 'PlotFrameI is an interface'
def create_zoom_marker(self):
'mark the zoom region, for drag-zooming'
raise AssertionError, 'PlotFrameI is an interface'
def adjust_zoom_marker(self, x0, y0, x1, y1):
'adjust the zoom region marker, for drag-zooming'
raise AssertionError, 'PlotFrameI is an interface'
def delete_zoom_marker(self):
'delete the zoom region marker (for drag-zooming)'
raise AssertionError, 'PlotFrameI is an interface'
def bind(self, *args):
'bind an event to a function'
raise AssertionError, 'PlotFrameI is an interface'
def unbind(self, *args):
'unbind an event'
raise AssertionError, 'PlotFrameI is an interface'
class CanvasPlotFrame(PlotFrameI):
def __init__(self, root, vals, rng):
self._root = root
self._original_rng = rng
self._original_vals = vals
self._frame = Tkinter.Frame(root)
self._frame.pack(expand=1, fill='both')
# Set up the canvas
self._canvas = Tkinter.Canvas(self._frame, background='white')
self._canvas['scrollregion'] = (0,0,200,200)
# Give the canvas scrollbars.
sb1 = Tkinter.Scrollbar(self._frame, orient='vertical')
sb1.pack(side='right', fill='y')
sb2 = Tkinter.Scrollbar(self._frame, orient='horizontal')
sb2.pack(side='bottom', fill='x')
self._canvas.pack(side='left', fill='both', expand=1)
# Connect the scrollbars to the canvas.
sb1.config(command=self._canvas.yview)
sb2['command']=self._canvas.xview
self._canvas['yscrollcommand'] = sb1.set
self._canvas['xscrollcommand'] = sb2.set
self._width = self._height = -1
self._canvas.bind('<Configure>', self._configure)
# Start out with linear coordinates.
self.config_axes(0, 0)
def _configure(self, event):
if self._width != event.width or self._height != event.height:
self._width = event.width
self._height = event.height
(i1, j1, i2, j2) = self.visible_area()
self.zoom(i1, j1, i2, j2)
def postscript(self, filename):
(x0, y0, w, h) = self._canvas['scrollregion'].split()
self._canvas.postscript(file=filename, x=float(x0), y=float(y0),
width=float(w)+2, height=float(h)+2)
def _plot(self, *args):
self._canvas.delete('all')
(i1, j1, i2, j2) = self.visible_area()
# Draw the Axes
xzero = -self._imin*self._dx
yzero = self._ymax+self._jmin*self._dy
neginf = min(self._imin, self._jmin, -1000)*1000
posinf = max(self._imax, self._jmax, 1000)*1000
self._canvas.create_line(neginf,yzero,posinf,yzero,
fill='gray', width=2)
self._canvas.create_line(xzero,neginf,xzero,posinf,
fill='gray', width=2)
# Draw the X grid.
if self._xlog:
(i1, i2) = (10**(i1), 10**(i2))
(imin, imax) = (10**(self._imin), 10**(self._imax))
# Grid step size.
di = (i2-i1)/1000.0
# round to a power of 10
di = 10.0**(int(log10(di)))
# grid start location
i = ceil(imin/di)*di
while i <= imax:
if i > 10*di: di *= 10
x = log10(i)*self._dx - log10(imin)*self._dx
self._canvas.create_line(x, neginf, x, posinf, fill='gray')
i += di
else:
# Grid step size.
di = max((i2-i1)/10.0, (self._imax-self._imin)/100)
# round to a power of 2
di = 2.0**(int(log(di)/log(2)))
# grid start location
i = int(self._imin/di)*di
# Draw the grid.
while i <= self._imax:
x = (i-self._imin)*self._dx
self._canvas.create_line(x, neginf, x, posinf, fill='gray')
i += di
# Draw the Y grid
if self._ylog:
(j1, j2) = (10**(j1), 10**(j2))
(jmin, jmax) = (10**(self._jmin), 10**(self._jmax))
# Grid step size.
dj = (j2-j1)/1000.0
# round to a power of 10
dj = 10.0**(int(log10(dj)))
# grid start locatjon
j = ceil(jmin/dj)*dj
while j <= jmax:
if j > 10*dj: dj *= 10
y = log10(jmax)*self._dy - log10(j)*self._dy
self._canvas.create_line(neginf, y, posinf, y, fill='gray')
j += dj
else:
# Grid step size.
dj = max((j2-j1)/10.0, (self._jmax-self._jmin)/100)
# round to a power of 2
dj = 2.0**(int(log(dj)/log(2)))
# grid start location
j = int(self._jmin/dj)*dj
# Draw the grid
while j <= self._jmax:
y = (j-self._jmin)*self._dy
self._canvas.create_line(neginf, y, posinf, y, fill='gray')
j += dj
# The plot
line = []
for (i,j) in zip(self._rng, self._vals):
x = (i-self._imin) * self._dx
y = self._ymax-((j-self._jmin) * self._dy)
line.append( (x,y) )
if len(line) == 1: line.append(line[0])
self._canvas.create_line(line, fill='black')
def config_axes(self, xlog, ylog):
if hasattr(self, '_rng'):
(i1, j1, i2, j2) = self.visible_area()
zoomed=1
else:
zoomed=0
self._xlog = xlog
self._ylog = ylog
if xlog: self._rng = [log10(x) for x in self._original_rng]
else: self._rng = self._original_rng
if ylog: self._vals = [log10(x) for x in self._original_vals]
else: self._vals = self._original_vals
self._imin = min(self._rng)
self._imax = max(self._rng)
if self._imax == self._imin:
self._imin -= 1
self._imax += 1
self._jmin = min(self._vals)
self._jmax = max(self._vals)
if self._jmax == self._jmin:
self._jmin -= 1
self._jmax += 1
if zoomed:
self.zoom(i1, j1, i2, j2)
else:
self.zoom(self._imin, self._jmin, self._imax, self._jmax)
def invtransform(self, x, y):
x = self._canvas.canvasx(x)
y = self._canvas.canvasy(y)
return (self._imin+x/self._dx, self._jmin+(self._ymax-y)/self._dy)
def zoom(self, i1, j1, i2, j2):
w = self._width
h = self._height
self._xmax = (self._imax-self._imin)/(i2-i1) * w
self._ymax = (self._jmax-self._jmin)/(j2-j1) * h
self._canvas['scrollregion'] = (0, 0, self._xmax, self._ymax)
self._dx = self._xmax/(self._imax-self._imin)
self._dy = self._ymax/(self._jmax-self._jmin)
self._plot()
# Pan to the correct place
self._canvas.xview('moveto', (i1-self._imin)/(self._imax-self._imin))
self._canvas.yview('moveto', (self._jmax-j2)/(self._jmax-self._jmin))
def visible_area(self):
xview = self._canvas.xview()
yview = self._canvas.yview()
i1 = self._imin + xview[0] * (self._imax-self._imin)
i2 = self._imin + xview[1] * (self._imax-self._imin)
j1 = self._jmax - yview[1] * (self._jmax-self._jmin)
j2 = self._jmax - yview[0] * (self._jmax-self._jmin)
return (i1, j1, i2, j2)
def create_zoom_marker(self):
self._canvas.create_rectangle(0,0,0,0, tag='zoom')
def adjust_zoom_marker(self, x0, y0, x1, y1):
x0 = self._canvas.canvasx(x0)
y0 = self._canvas.canvasy(y0)
x1 = self._canvas.canvasx(x1)
y1 = self._canvas.canvasy(y1)
self._canvas.coords('zoom', x0, y0, x1, y1)
def delete_zoom_marker(self):
self._canvas.delete('zoom')
def bind(self, *args): self._canvas.bind(*args)
def unbind(self, *args): self._canvas.unbind(*args)
class BLTPlotFrame(PlotFrameI):
def __init__(self, root, vals, rng):
#raise ImportError # for debugging CanvasPlotFrame
# Find ranges
self._imin = min(rng)
self._imax = max(rng)
if self._imax == self._imin:
self._imin -= 1
self._imax += 1
self._jmin = min(vals)
self._jmax = max(vals)
if self._jmax == self._jmin:
self._jmin -= 1
self._jmax += 1
# Create top-level frame.
self._root = root
self._frame = Tkinter.Frame(root)
self._frame.pack(expand=1, fill='both')
# Create the graph.
try:
import Pmw
# This reload is needed to prevent an error if we create
# more than 1 graph in the same interactive session:
reload(Pmw.Blt)
Pmw.initialise()
self._graph = Pmw.Blt.Graph(self._frame)
except:
raise ImportError('Pmw not installed!')
# Add scrollbars.
sb1 = Tkinter.Scrollbar(self._frame, orient='vertical')
sb1.pack(side='right', fill='y')
sb2 = Tkinter.Scrollbar(self._frame, orient='horizontal')
sb2.pack(side='bottom', fill='x')
self._graph.pack(side='left', fill='both', expand='yes')
self._yscroll = sb1
self._xscroll = sb2
# Connect the scrollbars to the canvas.
sb1['command'] = self._yview
sb2['command'] = self._xview
# Create the plot.
self._graph.line_create('plot', xdata=tuple(rng),
ydata=tuple(vals), symbol='')
self._graph.legend_configure(hide=1)
self._graph.grid_configure(hide=0)
self._set_scrollbars()
def _set_scrollbars(self):
(i1, j1, i2, j2) = self.visible_area()
(imin, imax) = (self._imin, self._imax)
(jmin, jmax) = (self._jmin, self._jmax)
self._xscroll.set((i1-imin)/(imax-imin), (i2-imin)/(imax-imin))
self._yscroll.set(1-(j2-jmin)/(jmax-jmin), 1-(j1-jmin)/(jmax-jmin))
def _xview(self, *command):
(i1, j1, i2, j2) = self.visible_area()
(imin, imax) = (self._imin, self._imax)
(jmin, jmax) = (self._jmin, self._jmax)
if command[0] == 'moveto':
f = float(command[1])
elif command[0] == 'scroll':
dir = int(command[1])
if command[2] == 'pages':
f = (i1-imin)/(imax-imin) + dir*(i2-i1)/(imax-imin)
elif command[2] == 'units':
f = (i1-imin)/(imax-imin) + dir*(i2-i1)/(10*(imax-imin))
else: return
else: return
f = max(f, 0)
f = min(f, 1-(i2-i1)/(imax-imin))
self.zoom(imin + f*(imax-imin), j1,
imin + f*(imax-imin)+(i2-i1), j2)
self._set_scrollbars()
def _yview(self, *command):
(i1, j1, i2, j2) = self.visible_area()
(imin, imax) = (self._imin, self._imax)
(jmin, jmax) = (self._jmin, self._jmax)
if command[0] == 'moveto':
f = 1.0-float(command[1]) - (j2-j1)/(jmax-jmin)
elif command[0] == 'scroll':
dir = -int(command[1])
if command[2] == 'pages':
f = (j1-jmin)/(jmax-jmin) + dir*(j2-j1)/(jmax-jmin)
elif command[2] == 'units':
f = (j1-jmin)/(jmax-jmin) + dir*(j2-j1)/(10*(jmax-jmin))
else: return
else: return
f = max(f, 0)
f = min(f, 1-(j2-j1)/(jmax-jmin))
self.zoom(i1, jmin + f*(jmax-jmin),
i2, jmin + f*(jmax-jmin)+(j2-j1))
self._set_scrollbars()
def config_axes(self, xlog, ylog):
self._graph.xaxis_configure(logscale=xlog)
self._graph.yaxis_configure(logscale=ylog)
def invtransform(self, x, y):
return self._graph.invtransform(x, y)
def zoom(self, i1, j1, i2, j2):
self._graph.xaxis_configure(min=i1, max=i2)
self._graph.yaxis_configure(min=j1, max=j2)
self._set_scrollbars()
def visible_area(self):
(i1, i2) = self._graph.xaxis_limits()
(j1, j2) = self._graph.yaxis_limits()
return (i1, j1, i2, j2)
def create_zoom_marker(self):
self._graph.marker_create("line", name="zoom", dashes=(2, 2))
def adjust_zoom_marker(self, press_x, press_y, release_x, release_y):
(i1, j1) = self._graph.invtransform(press_x, press_y)
(i2, j2) = self._graph.invtransform(release_x, release_y)
coords = (i1, j1, i2, j1, i2, j2, i1, j2, i1, j1)
self._graph.marker_configure("zoom", coords=coords)
def delete_zoom_marker(self):
self._graph.marker_delete("zoom")
def bind(self, *args): self._graph.bind(*args)
def unbind(self, *args): self._graph.unbind(*args)
def postscript(self, filename):
self._graph.postscript_output(filename)
class Plot(object):
"""
A simple graphical tool for plotting functions. Each new C{Plot}
object opens a new window, containing the plot for a sinlge
function. Multiple plots in the same window are not (yet)
supported. The C{Plot} constructor supports several mechanisms
for defining the set of points to plot.
Example plots
=============
Plot the math.sin function over the range [-10:10:.1]:
>>> import math
>>> Plot(math.sin)
Plot the math.sin function over the range [0:1:.001]:
>>> Plot(math.sin, slice(0, 1, .001))
Plot a list of points:
>>> points = ([1,1], [3,8], [5,3], [6,12], [1,24])
>>> Plot(points)
Plot a list of values, at x=0, x=1, x=2, ..., x=n:
>>> Plot([x**2 for x in range(20)])
"""
def __init__(self, vals, rng=None, **kwargs):
"""
Create a new C{Plot}.
@param vals: The set of values to plot. C{vals} can be a list
of y-values; a list of points; or a function.
@param rng: The range over which to plot. C{rng} can be a
list of x-values, or a slice object. If no range is
specified, a default range will be used. Note that C{rng}
may I{not} be specified if C{vals} is a list of points.
@keyword scale: The scales that should be used for the axes.
Possible values are:
- C{'linear'}: both axes are linear.
- C{'log-linear'}: The x axis is logarithmic; and the y
axis is linear.
- C{'linear-log'}: The x axis is linear; and the y axis
is logarithmic.
- C{'log'}: Both axes are logarithmic.
By default, C{scale} is C{'linear'}.
"""
# If range is a slice, then expand it to a list.
if type(rng) is SliceType:
(start, stop, step) = (rng.start, rng.stop, rng.step)
if step>0 and stop>start:
rng = [start]
i = 0
while rng[-1] < stop:
rng.append(start+i*step)
i += 1
elif step<0 and stop<start:
rng = [start]
i = 0
while rng[-1] > stop:
rng.append(start+i*step)
i += 1
else:
rng = []
# If vals is a function, evaluate it over range.
if type(vals) in (FunctionType, BuiltinFunctionType,
MethodType):
if rng is None: rng = [x*0.1 for x in range(-100, 100)]
try: vals = [vals(i) for i in rng]
except TypeError:
raise TypeError, 'Bad range type: %s' % type(rng)
# If vals isn't a function, make sure it's a sequence:
elif type(vals) not in (ListType, TupleType):
raise ValueError, 'Bad values type: %s' % type(vals)
# If vals is a list of points, unzip it.
elif len(vals) > 0 and type(vals[0]) in (ListType, TupleType):
if rng is not None:
estr = "Can't specify a range when vals is a list of points."
raise ValueError, estr
(rng, vals) = zip(*vals)
# If vals & rng are both lists, make sure their lengths match.
elif type(rng) in (ListType, TupleType):
if len(rng) != len(vals):
estr = 'Range list and value list have different lengths.'
raise ValueError, estr
# If rng is unspecified, take it to be integers starting at zero
elif rng is None:
rng = range(len(vals))
# If it's an unknown range type, then fail.
else:
raise TypeError, 'Bad range type: %s' % type(rng)
# Check that we have something to plot
if len(vals) == 0:
raise ValueError, 'Nothing to plot!'
# Set _rng/_vals
self._rng = rng
self._vals = vals
# Find max/min's.
self._imin = min(rng)
self._imax = max(rng)
if self._imax == self._imin:
self._imin -= 1
self._imax += 1
self._jmin = min(vals)
self._jmax = max(vals)
if self._jmax == self._jmin:
self._jmin -= 1
self._jmax += 1
# Do some basic error checking.
if len(self._rng) != len(self._vals):
raise ValueError("Rng and vals have different lengths")
if len(self._rng) == 0:
raise ValueError("Nothing to plot")
# Set up the tk window
self._root = Tkinter.Tk()
self._init_bindings(self._root)
# Create the actual plot frame
try:
self._plot = BLTPlotFrame(self._root, vals, rng)
except ImportError:
self._plot = CanvasPlotFrame(self._root, vals, rng)
# Set up the axes
self._ilog = Tkinter.IntVar(self._root); self._ilog.set(0)
self._jlog = Tkinter.IntVar(self._root); self._jlog.set(0)
scale = kwargs.get('scale', 'linear')
if scale in ('log-linear', 'log_linear', 'log'): self._ilog.set(1)
if scale in ('linear-log', 'linear_log', 'log'): self._jlog.set(1)
self._plot.config_axes(self._ilog.get(), self._jlog.get())
## Set up zooming
self._plot.bind("<ButtonPress-1>", self._zoom_in_buttonpress)
self._plot.bind("<ButtonRelease-1>", self._zoom_in_buttonrelease)
self._plot.bind("<ButtonPress-2>", self._zoom_out)
self._plot.bind("<ButtonPress-3>", self._zoom_out)
self._init_menubar(self._root)
def _init_bindings(self, parent):
self._root.bind('<Control-q>', self.destroy)
self._root.bind('<Control-x>', self.destroy)
self._root.bind('<Control-p>', self.postscript)
self._root.bind('<Control-a>', self._zoom_all)
self._root.bind('<F1>', self.help)
def _init_menubar(self, parent):
menubar = Tkinter.Menu(parent)
filemenu = Tkinter.Menu(menubar, tearoff=0)
filemenu.add_command(label='Print to Postscript', underline=0,
command=self.postscript, accelerator='Ctrl-p')
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
zoommenu = Tkinter.Menu(menubar, tearoff=0)
zoommenu.add_command(label='Zoom in', underline=5,
command=self._zoom_in, accelerator='left click')
zoommenu.add_command(label='Zoom out', underline=5,
command=self._zoom_out, accelerator='right click')
zoommenu.add_command(label='View 100%', command=self._zoom_all,
accelerator='Ctrl-a')
menubar.add_cascade(label='Zoom', underline=0, menu=zoommenu)
axismenu = Tkinter.Menu(menubar, tearoff=0)
if self._imin > 0: xstate = 'normal'
else: xstate = 'disabled'
if self._jmin > 0: ystate = 'normal'
else: ystate = 'disabled'
axismenu.add_checkbutton(label='Log X axis', underline=4,
variable=self._ilog, state=xstate,
command=self._log)
axismenu.add_checkbutton(label='Log Y axis', underline=4,
variable=self._jlog, state=ystate,
command=self._log)
menubar.add_cascade(label='Axes', underline=0, menu=axismenu)
helpmenu = Tkinter.Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', underline=0,
command=self.about)
helpmenu.add_command(label='Instructions', underline=0,
command=self.help, accelerator='F1')
menubar.add_cascade(label='Help', underline=0, menu=helpmenu)
parent.config(menu=menubar)
def _log(self, *e):
self._plot.config_axes(self._ilog.get(), self._jlog.get())
def about(self, *e):
"""
Dispaly an 'about' dialog window for the NLTK plot tool.
"""
ABOUT = ("NLTK Plot Tool\n"
"<http://nltk.sourceforge.net>")
TITLE = 'About: Plot Tool'
if isinstance(self._plot, BLTPlotFrame):
ABOUT += '\n\nBased on the BLT Widget'
try:
from tkMessageBox import Message
Message(message=ABOUT, title=TITLE).show()
except:
ShowText(self._root, TITLE, ABOUT)
def help(self, *e):
"""
Display a help window.
"""
doc = __doc__.split('\n@', 1)[0].strip()
import re
doc = re.sub(r'[A-Z]{([^}<]*)(<[^>}]*>)?}', r'\1', doc)
self._autostep = 0
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(self._root, 'Help: Plot Tool', doc,
width=75, font='fixed')
except:
ShowText(self._root, 'Help: Plot Tool', doc, width=75)
def postscript(self, *e):
"""
Print the (currently visible) contents of the plot window to a
postscript file.
"""
from tkFileDialog import asksaveasfilename
ftypes = [('Postscript files', '.ps'),
('All files', '*')]
filename = asksaveasfilename(filetypes=ftypes, defaultextension='.ps')
if not filename: return
self._plot.postscript(filename)
def destroy(self, *args):
"""
Cloase the plot window.
"""
if self._root is None: return
self._root.destroy()
self._root = None
def mainloop(self, *varargs, **kwargs):
"""
Enter the mainloop for the window. This method must be called
if a Plot is constructed from a non-interactive Python program
(e.g., from a script); otherwise, the plot window will close
as soon se the script completes.
"""
if in_idle(): return
self._root.mainloop(*varargs, **kwargs)
def _zoom(self, i1, j1, i2, j2):
# Make sure they're ordered correctly.
if i1 > i2: (i1,i2) = (i2,i1)
if j1 > j2: (j1,j2) = (j2,j1)
# Bounds checking: x
if i1 < self._imin:
i2 = min(self._imax, i2 + (self._imin - i1))
i1 = self._imin
if i2 > self._imax:
i1 = max(self._imin, i1 - (i2 - self._imax))
i2 = self._imax
# Bounds checking: y
if j1 < self._jmin:
j2 = min(self._jmax, j2 + self._jmin - j1)
j1 = self._jmin
if j2 > self._jmax:
j1 = max(self._jmin, j1 - (j2 - self._jmax))
j2 = self._jmax
# Range size checking:
if i1 == i2: i2 += 1
if j1 == j2: j2 += 1
if self._ilog.get(): i1 = max(1e-100, i1)
if self._jlog.get(): j1 = max(1e-100, j1)
# Do the actual zooming.
self._plot.zoom(i1, j1, i2, j2)
def _zoom_in_buttonpress(self, event):
self._press_x = event.x
self._press_y = event.y
self._press_time = time.time()
self._plot.create_zoom_marker()
self._bind_id = self._plot.bind("<Motion>", self._zoom_in_drag)
def _zoom_in_drag(self, event):
self._plot.adjust_zoom_marker(self._press_x, self._press_y,
event.x, event.y)
def _zoom_in_buttonrelease(self, event):
self._plot.delete_zoom_marker()
self._plot.unbind("<Motion>", self._bind_id)
if ((time.time() - self._press_time > 0.1) and
abs(self._press_x-event.x) + abs(self._press_y-event.y) > 5):
(i1, j1) = self._plot.invtransform(self._press_x, self._press_y)
(i2, j2) = self._plot.invtransform(event.x, event.y)
self._zoom(i1, j1, i2, j2)
else:
self._zoom_in()
def _zoom_in(self, *e):
(i1, j1, i2, j2) = self._plot.visible_area()
di = (i2-i1)*0.1
dj = (j2-j1)*0.1
self._zoom(i1+di, j1+dj, i2-di, j2-dj)
def _zoom_out(self, *e):
(i1, j1, i2, j2) = self._plot.visible_area()
di = -(i2-i1)*0.1
dj = -(j2-j1)*0.1
self._zoom(i1+di, j1+dj, i2-di, j2-dj)
def _zoom_all(self, *e):
self._zoom(self._imin, self._jmin, self._imax, self._jmax)
if __name__ == '__main__':
from math import sin
#Plot(lambda v: sin(v)**2+0.01)
Plot(lambda x:abs(x**2-sin(20*x**3))+.1,
[0.01*x for x in range(1,100)], scale='log').mainloop()
| Python |
# Natural Language Toolkit: CFG visualization
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: cfg.py 3460 2006-10-06 10:39:03Z stevenbird $
"""
Visualization tools for CFGs.
"""
"""
Idea for a nice demo:
- 3 panes: grammar, treelet, working area
- grammar is a list of productions
- when you select a production, the treelet that it licenses appears
in the treelet area
- the working area has the text on the bottom, and S at top. When
you select a production, it shows (ghosted) the locations where
that production's treelet could be attached to either the text
or the tree rooted at S.
- the user can drag the treelet onto one of those (or click on them?)
- the user can delete pieces of the tree from the working area
(right click?)
- connecting top to bottom? drag one NP onto another?
+-------------------------------------------------------------+
| S -> NP VP | S |
|[NP -> Det N ]| / \ |
| ... | NP VP |
| N -> 'dog' | |
| N -> 'cat' | |
| ... | |
+--------------+ |
| NP | Det N |
| / \ | | | |
| Det N | the cat saw the dog |
| | |
+--------------+----------------------------------------------+
Operations:
- connect a new treelet -- drag or click shadow
- delete a treelet -- right click
- if only connected to top, delete everything below
- if only connected to bottom, delete everything above
- connect top & bottom -- drag a leaf to a root or a root to a leaf
- disconnect top & bottom -- right click
- if connected to top & bottom, then disconnect
"""
from en.parser.nltk_lite.draw import *
from en.parser.nltk_lite.parse.cfg import *
from Tkinter import *
from en.parser.nltk_lite.parse.tree import *
from en.parser.nltk_lite.draw.tree import *
######################################################################
# Production List
######################################################################
class ProductionList(ColorizedList):
ARROW = SymbolWidget.SYMBOLS['rightarrow']
def _init_colortags(self, textwidget, options):
textwidget.tag_config('terminal', foreground='#006000')
textwidget.tag_config('arrow', font='symbol', underline='0')
textwidget.tag_config('nonterminal', foreground='blue',
font=('helvetica', -12, 'bold'))
def _item_repr(self, item):
contents = []
contents.append(('%s\t' % item.lhs(), 'nonterminal'))
contents.append((self.ARROW, 'arrow'))
for elt in item.rhs():
if isinstance(elt, Nonterminal):
contents.append((' %s' % elt.symbol(), 'nonterminal'))
else:
contents.append((' %r' % elt, 'terminal'))
return contents
######################################################################
# CFG Editor
######################################################################
_CFGEditor_HELP = """
The CFG Editor can be used to create or modify context free grammars.
A context free grammar consists of a start symbol and a list of
productions. The start symbol is specified by the text entry field in
the upper right hand corner of the editor; and the list of productions
are specified in the main text editing box.
Every non-blank line specifies a single production. Each production
has the form "LHS -> RHS," where LHS is a single nonterminal, and RHS
is a list of nonterminals and terminals.
Nonterminals must be a single word, such as S or NP or NP_subj.
Currently, nonterminals must consists of alphanumeric characters and
underscores (_). Nonterminals are colored blue. If you place the
mouse over any nonterminal, then all occurances of that nonterminal
will be highlighted.
Termianals must be surrounded by single quotes (') or double
quotes(\"). For example, "dog" and "New York" are terminals.
Currently, the string within the quotes must consist of alphanumeric
characters, underscores, and spaces.
To enter a new production, go to a blank line, and type a nonterminal,
followed by an arrow (->), followed by a sequence of terminals and
nonterminals. Note that "->" (dash + greater-than) is automatically
converted to an arrow symbol. When you move your cursor to a
different line, your production will automatically be colorized. If
there are any errors, they will be highlighted in red.
Note that the order of the productions is signifigant for some
algorithms. To re-order the productions, use cut and paste to move
them.
Use the buttons at the bottom of the window when you are done editing
the CFG:
- Ok: apply the new CFG, and exit the editor.
- Apply: apply the new CFG, and do not exit the editor.
- Reset: revert to the original CFG, and do not exit the editor.
- Cancel: revert to the original CFG, and exit the editor.
"""
class CFGEditor(object):
"""
A dialog window for creating and editing context free grammars.
C{CFGEditor} places the following restrictions on what C{CFG}s can
be edited:
- All nonterminals must be strings consisting of word
characters.
- All terminals must be strings consisting of word characters
and space characters.
"""
# Regular expressions used by _analyze_line. Precompile them, so
# we can process the text faster.
ARROW = SymbolWidget.SYMBOLS['rightarrow']
_LHS_RE = re.compile(r"(^\s*\w+\s*)(->|("+ARROW+"))")
_ARROW_RE = re.compile("\s*(->|("+ARROW+"))\s*")
_PRODUCTION_RE = re.compile(r"(^\s*\w+\s*)" + # LHS
"(->|("+ARROW+"))\s*" + # arrow
r"((\w+|'[\w ]*'|\"[\w ]*\"|\|)\s*)*$") # RHS
_TOKEN_RE = re.compile("\\w+|->|'[\\w ]+'|\"[\\w ]+\"|("+ARROW+")")
_BOLD = ('helvetica', -12, 'bold')
def __init__(self, parent, cfg=None, set_cfg_callback=None):
self._parent = parent
if cfg is not None: self._cfg = cfg
else: self._cfg = Grammar(Nonterminal('S'), [])
self._set_cfg_callback = set_cfg_callback
self._highlight_matching_nonterminals = 1
# Create the top-level window.
self._top = Toplevel(parent)
self._init_bindings()
self._init_startframe()
self._startframe.pack(side='top', fill='x', expand=0)
self._init_prodframe()
self._prodframe.pack(side='top', fill='both', expand=1)
self._init_buttons()
self._buttonframe.pack(side='bottom', fill='x', expand=0)
self._textwidget.focus()
def _init_startframe(self):
frame = self._startframe = Frame(self._top)
self._start = Entry(frame)
self._start.pack(side='right')
Label(frame, text='Start Symbol:').pack(side='right')
Label(frame, text='Productions:').pack(side='left')
self._start.insert(0, self._cfg.start().symbol())
def _init_buttons(self):
frame = self._buttonframe = Frame(self._top)
Button(frame, text='Ok', command=self._ok,
underline=0, takefocus=0).pack(side='left')
Button(frame, text='Apply', command=self._apply,
underline=0, takefocus=0).pack(side='left')
Button(frame, text='Reset', command=self._reset,
underline=0, takefocus=0,).pack(side='left')
Button(frame, text='Cancel', command=self._cancel,
underline=0, takefocus=0).pack(side='left')
Button(frame, text='Help', command=self._help,
underline=0, takefocus=0).pack(side='right')
def _init_bindings(self):
self._top.title('CFG Editor')
self._top.bind('<Control-q>', self._cancel)
self._top.bind('<Alt-q>', self._cancel)
self._top.bind('<Control-d>', self._cancel)
#self._top.bind('<Control-x>', self._cancel)
self._top.bind('<Alt-x>', self._cancel)
self._top.bind('<Escape>', self._cancel)
#self._top.bind('<Control-c>', self._cancel)
self._top.bind('<Alt-c>', self._cancel)
self._top.bind('<Control-o>', self._ok)
self._top.bind('<Alt-o>', self._ok)
self._top.bind('<Control-a>', self._apply)
self._top.bind('<Alt-a>', self._apply)
self._top.bind('<Control-r>', self._reset)
self._top.bind('<Alt-r>', self._reset)
self._top.bind('<Control-h>', self._help)
self._top.bind('<Alt-h>', self._help)
self._top.bind('<F1>', self._help)
def _init_prodframe(self):
self._prodframe = Frame(self._top)
# Create the basic Text widget & scrollbar.
self._textwidget = Text(self._prodframe, background='#e0e0e0',
exportselection=1)
self._textscroll = Scrollbar(self._prodframe, takefocus=0,
orient='vertical')
self._textwidget.config(yscrollcommand = self._textscroll.set)
self._textscroll.config(command=self._textwidget.yview)
self._textscroll.pack(side='right', fill='y')
self._textwidget.pack(expand=1, fill='both', side='left')
# Initialize the colorization tags. Each nonterminal gets its
# own tag, so they aren't listed here.
self._textwidget.tag_config('terminal', foreground='#006000')
self._textwidget.tag_config('arrow', font='symbol')
self._textwidget.tag_config('error', background='red')
# Keep track of what line they're on. We use that to remember
# to re-analyze a line whenever they leave it.
self._linenum = 0
# Expand "->" to an arrow.
self._top.bind('>', self._replace_arrows)
# Re-colorize lines when appropriate.
self._top.bind('<<Paste>>', self._analyze)
self._top.bind('<KeyPress>', self._check_analyze)
self._top.bind('<ButtonPress>', self._check_analyze)
# Tab cycles focus. (why doesn't this work??)
def cycle(e, textwidget=self._textwidget):
textwidget.tk_focusNext().focus()
self._textwidget.bind('<Tab>', cycle)
prod_tuples = [(p.lhs(),[p.rhs()]) for p in self._cfg.productions()]
for i in range(len(prod_tuples)-1,0,-1):
if (prod_tuples[i][0] == prod_tuples[i-1][0]):
if () in prod_tuples[i][1]: continue
if () in prod_tuples[i-1][1]: continue
print prod_tuples[i-1][1]
print prod_tuples[i][1]
prod_tuples[i-1][1].extend(prod_tuples[i][1])
del prod_tuples[i]
for lhs, rhss in prod_tuples:
print lhs, rhss
s = '%s ->' % lhs
for rhs in rhss:
for elt in rhs:
if isinstance(elt, Nonterminal): s += ' %s' % elt
else: s += ' %r' % elt
s += ' |'
s = s[:-2] + '\n'
self._textwidget.insert('end', s)
self._analyze()
# # Add the producitons to the text widget, and colorize them.
# prod_by_lhs = {}
# for prod in self._cfg.productions():
# if len(prod.rhs()) > 0:
# prod_by_lhs.setdefault(prod.lhs(),[]).append(prod)
# for (lhs, prods) in prod_by_lhs.items():
# self._textwidget.insert('end', '%s ->' % lhs)
# self._textwidget.insert('end', self._rhs(prods[0]))
# for prod in prods[1:]:
# print '\t|'+self._rhs(prod),
# self._textwidget.insert('end', '\t|'+self._rhs(prod))
# print
# self._textwidget.insert('end', '\n')
# for prod in self._cfg.productions():
# if len(prod.rhs()) == 0:
# self._textwidget.insert('end', '%s' % prod)
# self._analyze()
# def _rhs(self, prod):
# s = ''
# for elt in prod.rhs():
# if isinstance(elt, Nonterminal): s += ' %s' % elt.symbol()
# else: s += ' %r' % elt
# return s
def _clear_tags(self, linenum):
"""
Remove all tags (except C{arrow} and C{sel}) from the given
line of the text widget used for editing the productions.
"""
start = '%d.0'%linenum
end = '%d.end'%linenum
for tag in self._textwidget.tag_names():
if tag not in ('arrow', 'sel'):
self._textwidget.tag_remove(tag, start, end)
def _check_analyze(self, *e):
"""
Check if we've moved to a new line. If we have, then remove
all colorization from the line we moved to, and re-colorize
the line that we moved from.
"""
linenum = int(self._textwidget.index('insert').split('.')[0])
if linenum != self._linenum:
self._clear_tags(linenum)
self._analyze_line(self._linenum)
self._linenum = linenum
def _replace_arrows(self, *e):
"""
Replace any C{'->'} text strings with arrows (char \\256, in
symbol font). This searches the whole buffer, but is fast
enough to be done anytime they press '>'.
"""
arrow = '1.0'
while 1:
arrow = self._textwidget.search('->', arrow, 'end+1char')
if arrow == '': break
self._textwidget.delete(arrow, arrow+'+2char')
self._textwidget.insert(arrow, self.ARROW, 'arrow')
self._textwidget.insert(arrow, '\t')
arrow = '1.0'
while 1:
arrow = self._textwidget.search(self.ARROW, arrow+'+1char',
'end+1char')
if arrow == '': break
self._textwidget.tag_add('arrow', arrow, arrow+'+1char')
def _analyze_token(self, match, linenum):
"""
Given a line number and a regexp match for a token on that
line, colorize the token. Note that the regexp match gives us
the token's text, start index (on the line), and end index (on
the line).
"""
# What type of token is it?
if match.group()[0] in "'\"": tag = 'terminal'
elif match.group() in ('->', self.ARROW): tag = 'arrow'
else:
# If it's a nonterminal, then set up new bindings, so we
# can highlight all instances of that nonterminal when we
# put the mouse over it.
tag = 'nonterminal_'+match.group()
if tag not in self._textwidget.tag_names():
self._init_nonterminal_tag(tag)
start = '%d.%d' % (linenum, match.start())
end = '%d.%d' % (linenum, match.end())
self._textwidget.tag_add(tag, start, end)
def _init_nonterminal_tag(self, tag, foreground='blue'):
self._textwidget.tag_config(tag, foreground=foreground,
font=CFGEditor._BOLD)
if not self._highlight_matching_nonterminals:
return
def enter(e, textwidget=self._textwidget, tag=tag):
textwidget.tag_config(tag, background='#80ff80')
def leave(e, textwidget=self._textwidget, tag=tag):
textwidget.tag_config(tag, background='')
self._textwidget.tag_bind(tag, '<Enter>', enter)
self._textwidget.tag_bind(tag, '<Leave>', leave)
def _analyze_line(self, linenum):
"""
Colorize a given line.
"""
# Get rid of any tags that were previously on the line.
self._clear_tags(linenum)
# Get the line line's text string.
line = self._textwidget.get(`linenum`+'.0', `linenum`+'.end')
# If it's a valid production, then colorize each token.
if CFGEditor._PRODUCTION_RE.match(line):
# It's valid; Use _TOKEN_RE to tokenize the production,
# and call analyze_token on each token.
def analyze_token(match, self=self, linenum=linenum):
self._analyze_token(match, linenum)
return ''
CFGEditor._TOKEN_RE.sub(analyze_token, line)
elif line.strip() != '':
# It's invalid; show the user where the error is.
self._mark_error(linenum, line)
def _mark_error(self, linenum, line):
"""
Mark the location of an error in a line.
"""
arrowmatch = CFGEditor._ARROW_RE.search(line)
if not arrowmatch:
# If there's no arrow at all, highlight the whole line.
start = '%d.0' % linenum
end = '%d.end' % linenum
elif not CFGEditor._LHS_RE.match(line):
# Otherwise, if the LHS is bad, highlight it.
start = '%d.0' % linenum
end = '%d.%d' % (linenum, arrowmatch.start())
else:
# Otherwise, highlight the RHS.
start = '%d.%d' % (linenum, arrowmatch.end())
end = '%d.end' % linenum
# If we're highlighting 0 chars, highlight the whole line.
if self._textwidget.compare(start, '==', end):
start = '%d.0' % linenum
end = '%d.end' % linenum
self._textwidget.tag_add('error', start, end)
def _analyze(self, *e):
"""
Replace C{->} with arrows, and colorize the entire buffer.
"""
self._replace_arrows()
numlines = int(self._textwidget.index('end').split('.')[0])
for linenum in range(1, numlines+1): # line numbers start at 1.
self._analyze_line(linenum)
def _parse_productions(self):
"""
Parse the current contents of the textwidget buffer, to create
a list of productions.
"""
productions = []
# Get the text, normalize it, and split it into lines.
text = self._textwidget.get('1.0', 'end')
text = re.sub(self.ARROW, '->', text)
text = re.sub('\t', ' ', text)
lines = text.split('\n')
# Convert each line to a CFG production
for line in lines:
line = line.strip()
if line=='': continue
productions += parse_production(line)
#if line.strip() == '': continue
#if not CFGEditor._PRODUCTION_RE.match(line):
# raise ValueError('Bad production string %r' % line)
#
#(lhs_str, rhs_str) = line.split('->')
#lhs = Nonterminal(lhs_str.strip())
#rhs = []
#def parse_token(match, rhs=rhs):
# token = match.group()
# if token[0] in "'\"": rhs.append(token[1:-1])
# else: rhs.append(Nonterminal(token))
# return ''
#CFGEditor._TOKEN_RE.sub(parse_token, rhs_str)
#
#productions.append(Production(lhs, *rhs))
return productions
def _destroy(self, *e):
if self._top is None: return
self._top.destroy()
self._top = None
def _ok(self, *e):
self._apply()
self._destroy()
def _apply(self, *e):
productions = self._parse_productions()
start = Nonterminal(self._start.get())
cfg = Grammar(start, productions)
if self._set_cfg_callback is not None:
self._set_cfg_callback(cfg)
def _reset(self, *e):
self._textwidget.delete('1.0', 'end')
for production in self._cfg.productions():
self._textwidget.insert('end', '%s\n' % production)
self._analyze()
if self._set_cfg_callback is not None:
self._set_cfg_callback(self._cfg)
def _cancel(self, *e):
try: self._reset()
except: pass
self._destroy()
def _help(self, *e):
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(self._parent, 'Help: Chart Parser Demo',
(_CFGEditor_HELP).strip(), width=75, font='fixed')
except:
ShowText(self._parent, 'Help: Chart Parser Demo',
(_CFGEditor_HELP).strip(), width=75)
######################################################################
# New Demo (built tree based on cfg)
######################################################################
class CFGDemo(object):
def __init__(self, grammar, text):
self._grammar = grammar
self._text = text
# Set up the main window.
self._top = Tk()
self._top.title('Context Free Grammar Demo')
# Base font size
self._size = IntVar(self._top)
self._size.set(12) # = medium
# Set up the key bindings
self._init_bindings(self._top)
# Create the basic frames
frame1 = Frame(self._top)
frame1.pack(side='left', fill='y', expand=0)
self._init_menubar(self._top)
self._init_buttons(self._top)
self._init_grammar(frame1)
self._init_treelet(frame1)
self._init_workspace(self._top)
#//////////////////////////////////////////////////
# Initialization
#//////////////////////////////////////////////////
def _init_bindings(self, top):
top.bind('<Control-q>', self.destroy)
def _init_menubar(self, parent): pass
def _init_buttons(self, parent): pass
def _init_grammar(self, parent):
self._prodlist = ProductionList(parent, self._grammar, width=20)
self._prodlist.pack(side='top', fill='both', expand=1)
self._prodlist.focus()
self._prodlist.add_callback('select', self._selectprod_cb)
self._prodlist.add_callback('move', self._selectprod_cb)
def _init_treelet(self, parent):
self._treelet_canvas = Canvas(parent, background='white')
self._treelet_canvas.pack(side='bottom', fill='x')
self._treelet = None
def _init_workspace(self, parent):
self._workspace = CanvasFrame(parent, background='white')
self._workspace.pack(side='right', fill='both', expand=1)
self._tree = None
self.reset_workspace()
#//////////////////////////////////////////////////
# Workspace
#//////////////////////////////////////////////////
def reset_workspace(self):
c = self._workspace.canvas()
fontsize = int(self._size.get())
node_font = ('helvetica', -(fontsize+4), 'bold')
leaf_font = ('helvetica', -(fontsize+2))
# Remove the old tree
if self._tree is not None:
self._workspace.remove_widget(self._tree)
# The root of the tree.
start = self._grammar.start().symbol()
rootnode = TextWidget(c, start, font=node_font, draggable=1)
# The leaves of the tree.
leaves = []
for word in self._text:
if isinstance(word, Token): word = word.type()
leaves.append(TextWidget(c, word, font=leaf_font, draggable=1))
# Put it all together into one tree
self._tree = TreeSegmentWidget(c, rootnode, leaves,
color='white')
# Add it to the workspace.
self._workspace.add_widget(self._tree)
# Move the leaves to the bottom of the workspace.
for leaf in leaves: leaf.move(0,100)
#self._nodes = {start:1}
#self._leaves = dict([(l,1) for l in leaves])
def workspace_markprod(self, production):
pass
def _markproduction(self, prod, tree=None):
if tree is None: tree = self._tree
for i in range(len(tree.subtrees())-len(prod.rhs())):
if tree['color', i] == 'white':
self._markproduction
for j, node in enumerate(prod.rhs()):
widget = tree.subtrees()[i+j]
if (isinstance(node, Nonterminal) and
isinstance(widget, TreeSegmentWidget) and
node.symbol == widget.node().text()):
pass # matching nonterminal
elif (isinstance(node, (str, unicode)) and
isinstance(widget, TextWidget) and
node == widget.text()):
pass # matching nonterminal
else: break
else:
# Everything matched!
print 'MATCH AT', i
#//////////////////////////////////////////////////
# Grammar
#//////////////////////////////////////////////////
def _selectprod_cb(self, production):
canvas = self._treelet_canvas
self._prodlist.highlight(production)
if self._treelet is not None: self._treelet.destroy()
# Convert the production to a tree.
rhs = production.rhs()
for (i, elt) in enumerate(rhs):
if isinstance(elt, Nonterminal): elt = Tree(elt)
tree = Tree(production.lhs().symbol(), *rhs)
# Draw the tree in the treelet area.
fontsize = int(self._size.get())
node_font = ('helvetica', -(fontsize+4), 'bold')
leaf_font = ('helvetica', -(fontsize+2))
self._treelet = tree_to_treesegment(canvas, tree,
node_font=node_font,
leaf_font=leaf_font)
self._treelet['draggable'] = 1
# Center the treelet.
(x1, y1, x2, y2) = self._treelet.bbox()
w, h = int(canvas['width']), int(canvas['height'])
self._treelet.move((w-x1-x2)/2, (h-y1-y2)/2)
# Mark the places where we can add it to the workspace.
self._markproduction(production)
def destroy(self, *args):
self._top.destroy()
def mainloop(self, *args, **kwargs):
self._top.mainloop(*args, **kwargs)
def demo2():
from en.parser.nltk_lite.parse import cfg
nonterminals = 'S VP NP PP P N Name V Det'
(S, VP, NP, PP, P, N, Name, V, Det) = [cfg.Nonterminal(s)
for s in nonterminals.split()]
productions = (
# Syntactic Productions
cfg.Production(S, [NP, VP]),
cfg.Production(NP, [Det, N]),
cfg.Production(NP, [NP, PP]),
cfg.Production(VP, [VP, PP]),
cfg.Production(VP, [V, NP, PP]),
cfg.Production(VP, [V, NP]),
cfg.Production(PP, [P, NP]),
cfg.Production(PP, []),
cfg.Production(PP, ['up', 'over', NP]),
# Lexical Productions
cfg.Production(NP, ['I']), cfg.Production(Det, ['the']),
cfg.Production(Det, ['a']), cfg.Production(N, ['man']),
cfg.Production(V, ['saw']), cfg.Production(P, ['in']),
cfg.Production(P, ['with']), cfg.Production(N, ['park']),
cfg.Production(N, ['dog']), cfg.Production(N, ['statue']),
cfg.Production(Det, ['my']),
)
grammar = cfg.Grammar(S, productions)
text = 'I saw a man in the park'.split()
d=CFGDemo(grammar, text)
d.mainloop()
######################################################################
# Old Demo
######################################################################
def demo():
from en.parser.nltk_lite.parse import cfg
nonterminals = 'S VP NP PP P N Name V Det'
(S, VP, NP, PP, P, N, Name, V, Det) = [cfg.Nonterminal(s)
for s in nonterminals.split()]
grammar = cfg.parse_grammar("""
S -> NP VP
PP -> P NP
PP -> 'up' 'over' NP
NP -> Det N
NP -> NP PP
VP -> V NP
VP -> VP PP
Det -> 'a'
Det -> 'the'
Det -> 'my'
NP -> 'I'
N -> 'dog'
N -> 'man'
N -> 'park'
N -> 'statue'
V -> 'saw'
P -> 'in'
P -> 'with'
""")
def cb(grammar): print grammar
top = Tk()
editor = CFGEditor(top, grammar, cb)
Label(top, text='\nTesting CFG Editor\n').pack()
Button(top, text='Quit', command=top.destroy).pack()
top.mainloop()
def demo3():
from en.parser.nltk_lite.parse import cfg
(S, VP, NP, PP, P, N, Name, V, Det) = \
nonterminals('S, VP, NP, PP, P, N, Name, V, Det')
productions = (
# Syntactic Productions
cfg.Production(S, [NP, VP]),
cfg.Production(NP, [Det, N]),
cfg.Production(NP, [NP, PP]),
cfg.Production(VP, [VP, PP]),
cfg.Production(VP, [V, NP, PP]),
cfg.Production(VP, [V, NP]),
cfg.Production(PP, [P, NP]),
cfg.Production(PP, []),
cfg.Production(PP, ['up', 'over', NP]),
# Lexical Productions
cfg.Production(NP, ['I']), cfg.Production(Det, ['the']),
cfg.Production(Det, ['a']), cfg.Production(N, ['man']),
cfg.Production(V, ['saw']), cfg.Production(P, ['in']),
cfg.Production(P, ['with']), cfg.Production(N, ['park']),
cfg.Production(N, ['dog']), cfg.Production(N, ['statue']),
cfg.Production(Det, ['my']),
)
t = Tk()
def destroy(e, t=t): t.destroy()
t.bind('q', destroy)
p = ProductionList(t, productions)
p.pack(expand=1, fill='both')
p.add_callback('select', p.markonly)
p.add_callback('move', p.markonly)
p.focus()
p.mark(productions[2])
p.mark(productions[8])
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: Recursive Descent Parser Demo
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: rdparser.py 3460 2006-10-06 10:39:03Z stevenbird $
"""
A graphical tool for exploring the recursive descent parser.
The recursive descent parser maintains a tree, which records the
structure of the portion of the text that has been parsed. It uses
CFG productions to expand the fringe of the tree, and matches its
leaves against the text. Initially, the tree contains the start
symbol ("S"). It is shown in the main canvas, to the right of the
list of available expansions.
The parser builds up a tree structure for the text using three
operations:
- "expand" uses a CFG production to add children to a node on the
fringe of the tree.
- "match" compares a leaf in the tree to a text token.
- "backtrack" returns the tree to its state before the most recent
expand or match operation.
The parser maintains a list of tree locations called a "frontier" to
remember which nodes have not yet been expanded and which leaves have
not yet been matched against the text. The leftmost frontier node is
shown in green, and the other frontier nodes are shown in blue. The
parser always performs expand and match operations on the leftmost
element of the frontier.
You can control the parser's operation by using the "expand," "match,"
and "backtrack" buttons; or you can use the "step" button to let the
parser automatically decide which operation to apply. The parser uses
the following rules to decide which operation to apply:
- If the leftmost frontier element is a token, try matching it.
- If the leftmost frontier element is a node, try expanding it with
the first untried expansion.
- Otherwise, backtrack.
The "expand" button applies the untried expansion whose CFG production
is listed earliest in the grammar. To manually choose which expansion
to apply, click on a CFG production from the list of available
expansions, on the left side of the main window.
The "autostep" button will let the parser continue applying
applications to the tree until it reaches a complete parse. You can
cancel an autostep in progress at any time by clicking on the
"autostep" button again.
Keyboard Shortcuts::
[Space]\t Perform the next expand, match, or backtrack operation
[a]\t Step through operations until the next complete parse
[e]\t Perform an expand operation
[m]\t Perform a match operation
[b]\t Perform a backtrack operation
[Delete]\t Reset the parser
[g]\t Show/hide available expansions list
[h]\t Help
[Ctrl-p]\t Print
[q]\t Quit
"""
from en.parser.nltk_lite.draw.tree import *
from en.parser.nltk_lite.draw import *
from en.parser.nltk_lite import parse
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.draw.cfg import *
from en.parser.nltk_lite.draw.cfg import CFGEditor
import tkFont
from Tkinter import *
class RecursiveDescentDemo(object):
"""
A graphical tool for exploring the recursive descent parser. The tool
displays the parser's tree and the remaining text, and allows the
user to control the parser's operation. In particular, the user
can expand subtrees on the frontier, match tokens on the frontier
against the text, and backtrack. A "step" button simply steps
through the parsing process, performing the operations that
C{RecursiveDescentParser} would use.
"""
def __init__(self, grammar, sent, trace=0):
self._sent = sent
self._parser = parse.SteppingRecursiveDescent(grammar, trace)
# Set up the main window.
self._top = Tk()
self._top.title('Recursive Descent Parser Demo')
# Set up key bindings.
self._init_bindings()
# Initialize the fonts.
self._init_fonts(self._top)
# Animations. animating_lock is a lock to prevent the demo
# from performing new operations while it's animating.
self._animation_frames = IntVar(self._top)
self._animation_frames.set(5)
self._animating_lock = 0
self._autostep = 0
# The user can hide the grammar.
self._show_grammar = IntVar(self._top)
self._show_grammar.set(1)
# Create the basic frames.
self._init_menubar(self._top)
self._init_buttons(self._top)
self._init_feedback(self._top)
self._init_grammar(self._top)
self._init_canvas(self._top)
# Initialize the parser.
self._parser.initialize(self._sent)
# Resize callback
self._canvas.bind('<Configure>', self._configure)
#########################################
## Initialization Helpers
#########################################
def _init_fonts(self, root):
# See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
self._sysfont = tkFont.Font(font=Button()["font"])
root.option_add("*Font", self._sysfont)
# TWhat's our font size (default=same as sysfont)
self._size = IntVar(root)
self._size.set(self._sysfont.cget('size'))
self._boldfont = tkFont.Font(family='helvetica', weight='bold',
size=self._size.get())
self._font = tkFont.Font(family='helvetica',
size=self._size.get())
if self._size.get() < 0: big = self._size.get()-2
else: big = self._size.get()+2
self._bigfont = tkFont.Font(family='helvetica', weight='bold',
size=big)
def _init_grammar(self, parent):
# Grammar view.
self._prodframe = listframe = Frame(parent)
self._prodframe.pack(fill='both', side='left', padx=2)
self._prodlist_label = Label(self._prodframe, font=self._boldfont,
text='Available Expansions')
self._prodlist_label.pack()
self._prodlist = Listbox(self._prodframe, selectmode='single',
relief='groove', background='white',
foreground='#909090', font=self._font,
selectforeground='#004040',
selectbackground='#c0f0c0')
self._prodlist.pack(side='right', fill='both', expand=1)
self._productions = list(self._parser.grammar().productions())
for production in self._productions:
self._prodlist.insert('end', (' %s' % production))
self._prodlist.config(height=min(len(self._productions), 25))
# Add a scrollbar if there are more than 25 productions.
if len(self._productions) > 25:
listscroll = Scrollbar(self._prodframe,
orient='vertical')
self._prodlist.config(yscrollcommand = listscroll.set)
listscroll.config(command=self._prodlist.yview)
listscroll.pack(side='left', fill='y')
# If they select a production, apply it.
self._prodlist.bind('<<ListboxSelect>>', self._prodlist_select)
def _init_bindings(self):
# Key bindings are a good thing.
self._top.bind('<Control-q>', self.destroy)
self._top.bind('<Control-x>', self.destroy)
self._top.bind('<Escape>', self.destroy)
self._top.bind('e', self.expand)
#self._top.bind('<Alt-e>', self.expand)
#self._top.bind('<Control-e>', self.expand)
self._top.bind('m', self.match)
self._top.bind('<Alt-m>', self.match)
self._top.bind('<Control-m>', self.match)
self._top.bind('b', self.backtrack)
self._top.bind('<Alt-b>', self.backtrack)
self._top.bind('<Control-b>', self.backtrack)
self._top.bind('<Control-z>', self.backtrack)
self._top.bind('<BackSpace>', self.backtrack)
self._top.bind('a', self.autostep)
#self._top.bind('<Control-a>', self.autostep)
self._top.bind('<Control-space>', self.autostep)
self._top.bind('<Control-c>', self.cancel_autostep)
self._top.bind('<space>', self.step)
self._top.bind('<Delete>', self.reset)
self._top.bind('<Control-p>', self.postscript)
#self._top.bind('<h>', self.help)
#self._top.bind('<Alt-h>', self.help)
self._top.bind('<Control-h>', self.help)
self._top.bind('<F1>', self.help)
#self._top.bind('<g>', self.toggle_grammar)
#self._top.bind('<Alt-g>', self.toggle_grammar)
#self._top.bind('<Control-g>', self.toggle_grammar)
self._top.bind('<Control-g>', self.edit_grammar)
self._top.bind('<Control-t>', self.edit_sentence)
def _init_buttons(self, parent):
# Set up the frames.
self._buttonframe = buttonframe = Frame(parent)
buttonframe.pack(fill='none', side='bottom', padx=3, pady=2)
Button(buttonframe, text='Step',
background='#90c0d0', foreground='black',
command=self.step,).pack(side='left')
Button(buttonframe, text='Autostep',
background='#90c0d0', foreground='black',
command=self.autostep,).pack(side='left')
Button(buttonframe, text='Expand', underline=0,
background='#90f090', foreground='black',
command=self.expand).pack(side='left')
Button(buttonframe, text='Match', underline=0,
background='#90f090', foreground='black',
command=self.match).pack(side='left')
Button(buttonframe, text='Backtrack', underline=0,
background='#f0a0a0', foreground='black',
command=self.backtrack).pack(side='left')
# Replace autostep...
# self._autostep_button = Button(buttonframe, text='Autostep',
# underline=0, command=self.autostep)
# self._autostep_button.pack(side='left')
def _configure(self, event):
self._autostep = 0
(x1, y1, x2, y2) = self._cframe.scrollregion()
y2 = event.height - 6
self._canvas['scrollregion'] = '%d %d %d %d' % (x1,y1,x2,y2)
self._redraw()
def _init_feedback(self, parent):
self._feedbackframe = feedbackframe = Frame(parent)
feedbackframe.pack(fill='x', side='bottom', padx=3, pady=3)
self._lastoper_label = Label(feedbackframe, text='Last Operation:',
font=self._font)
self._lastoper_label.pack(side='left')
lastoperframe = Frame(feedbackframe, relief='sunken', border=1)
lastoperframe.pack(fill='x', side='right', expand=1, padx=5)
self._lastoper1 = Label(lastoperframe, foreground='#007070',
background='#f0f0f0', font=self._font)
self._lastoper2 = Label(lastoperframe, anchor='w', width=30,
foreground='#004040', background='#f0f0f0',
font=self._font)
self._lastoper1.pack(side='left')
self._lastoper2.pack(side='left', fill='x', expand=1)
def _init_canvas(self, parent):
self._cframe = CanvasFrame(parent, background='white',
#width=525, height=250,
closeenough=10,
border=2, relief='sunken')
self._cframe.pack(expand=1, fill='both', side='top', pady=2)
canvas = self._canvas = self._cframe.canvas()
# Initially, there's no tree or text
self._tree = None
self._textwidgets = []
self._textline = None
def _init_menubar(self, parent):
menubar = Menu(parent)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='Reset Parser', underline=0,
command=self.reset, accelerator='Del')
filemenu.add_command(label='Print to Postscript', underline=0,
command=self.postscript, accelerator='Ctrl-p')
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label='Edit Grammar', underline=5,
command=self.edit_grammar,
accelerator='Ctrl-g')
editmenu.add_command(label='Edit Text', underline=5,
command=self.edit_sentence,
accelerator='Ctrl-t')
menubar.add_cascade(label='Edit', underline=0, menu=editmenu)
rulemenu = Menu(menubar, tearoff=0)
rulemenu.add_command(label='Step', underline=1,
command=self.step, accelerator='Space')
rulemenu.add_separator()
rulemenu.add_command(label='Match', underline=0,
command=self.match, accelerator='Ctrl-m')
rulemenu.add_command(label='Expand', underline=0,
command=self.expand, accelerator='Ctrl-e')
rulemenu.add_separator()
rulemenu.add_command(label='Backtrack', underline=0,
command=self.backtrack, accelerator='Ctrl-b')
menubar.add_cascade(label='Apply', underline=0, menu=rulemenu)
viewmenu = Menu(menubar, tearoff=0)
viewmenu.add_checkbutton(label="Show Grammar", underline=0,
variable=self._show_grammar,
command=self._toggle_grammar)
viewmenu.add_separator()
viewmenu.add_radiobutton(label='Tiny', variable=self._size,
underline=0, value=10, command=self.resize)
viewmenu.add_radiobutton(label='Small', variable=self._size,
underline=0, value=12, command=self.resize)
viewmenu.add_radiobutton(label='Medium', variable=self._size,
underline=0, value=14, command=self.resize)
viewmenu.add_radiobutton(label='Large', variable=self._size,
underline=0, value=18, command=self.resize)
viewmenu.add_radiobutton(label='Huge', variable=self._size,
underline=0, value=24, command=self.resize)
menubar.add_cascade(label='View', underline=0, menu=viewmenu)
animatemenu = Menu(menubar, tearoff=0)
animatemenu.add_radiobutton(label="No Animation", underline=0,
variable=self._animation_frames,
value=0)
animatemenu.add_radiobutton(label="Slow Animation", underline=0,
variable=self._animation_frames,
value=10, accelerator='-')
animatemenu.add_radiobutton(label="Normal Animation", underline=0,
variable=self._animation_frames,
value=5, accelerator='=')
animatemenu.add_radiobutton(label="Fast Animation", underline=0,
variable=self._animation_frames,
value=2, accelerator='+')
menubar.add_cascade(label="Animate", underline=1, menu=animatemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', underline=0,
command=self.about)
helpmenu.add_command(label='Instructions', underline=0,
command=self.help, accelerator='F1')
menubar.add_cascade(label='Help', underline=0, menu=helpmenu)
parent.config(menu=menubar)
#########################################
## Helper
#########################################
def _get(self, widget, treeloc):
for i in treeloc: widget = widget.subtrees()[i]
if isinstance(widget, TreeSegmentWidget):
widget = widget.node()
return widget
#########################################
## Main draw procedure
#########################################
def _redraw(self):
canvas = self._canvas
# Delete the old tree, widgets, etc.
if self._tree is not None:
self._cframe.destroy_widget(self._tree)
for twidget in self._textwidgets:
self._cframe.destroy_widget(twidget)
if self._textline is not None:
self._canvas.delete(self._textline)
# Draw the tree.
helv = ('helvetica', -self._size.get())
bold = ('helvetica', -self._size.get(), 'bold')
attribs = {'tree_color': '#000000', 'tree_width': 2,
'node_font': bold, 'leaf_font': helv,}
tree = self._parser.tree()
self._tree = tree_to_treesegment(canvas, tree, **attribs)
self._cframe.add_widget(self._tree, 30, 5)
# Draw the text.
helv = ('helvetica', -self._size.get())
bottom = y = self._cframe.scrollregion()[3]
self._textwidgets = [TextWidget(canvas, word, font=self._font)
for word in self._sent]
for twidget in self._textwidgets:
self._cframe.add_widget(twidget, 0, 0)
twidget.move(0, bottom-twidget.bbox()[3]-5)
y = min(y, twidget.bbox()[1])
# Draw a line over the text, to separate it from the tree.
self._textline = canvas.create_line(-5000, y-5, 5000, y-5, dash='.')
# Highlight appropriate nodes.
self._highlight_nodes()
self._highlight_prodlist()
# Make sure the text lines up.
self._position_text()
def _redraw_quick(self):
# This should be more-or-less sufficient after an animation.
self._highlight_nodes()
self._highlight_prodlist()
self._position_text()
def _highlight_nodes(self):
# Highlight the list of nodes to be checked.
bold = ('helvetica', -self._size.get(), 'bold')
for treeloc in self._parser.frontier()[:1]:
self._get(self._tree, treeloc)['color'] = '#20a050'
self._get(self._tree, treeloc)['font'] = bold
for treeloc in self._parser.frontier()[1:]:
self._get(self._tree, treeloc)['color'] = '#008080'
def _highlight_prodlist(self):
# Highlight the productions that can be expanded.
# Boy, too bad tkinter doesn't implement Listbox.itemconfig;
# that would be pretty useful here.
self._prodlist.delete(0, 'end')
expandable = self._parser.expandable_productions()
untried = self._parser.untried_expandable_productions()
productions = self._productions
for index in range(len(productions)):
if productions[index] in expandable:
if productions[index] in untried:
self._prodlist.insert(index, ' %s' % productions[index])
else:
self._prodlist.insert(index, ' %s (TRIED)' %
productions[index])
self._prodlist.selection_set(index)
else:
self._prodlist.insert(index, ' %s' % productions[index])
def _position_text(self):
# Line up the text widgets that are matched against the tree
numwords = len(self._sent)
num_matched = numwords - len(self._parser.remaining_text())
leaves = self._tree_leaves()[:num_matched]
xmax = self._tree.bbox()[0]
for i in range(0, len(leaves)):
widget = self._textwidgets[i]
leaf = leaves[i]
widget['color'] = '#006040'
leaf['color'] = '#006040'
widget.move(leaf.bbox()[0] - widget.bbox()[0], 0)
xmax = widget.bbox()[2] + 10
# Line up the text widgets that are not matched against the tree.
for i in range(len(leaves), numwords):
widget = self._textwidgets[i]
widget['color'] = '#a0a0a0'
widget.move(xmax - widget.bbox()[0], 0)
xmax = widget.bbox()[2] + 10
# If we have a complete parse, make everything green :)
if self._parser.currently_complete():
for twidget in self._textwidgets:
twidget['color'] = '#00a000'
# Move the matched leaves down to the text.
for i in range(0, len(leaves)):
widget = self._textwidgets[i]
leaf = leaves[i]
dy = widget.bbox()[1] - leaf.bbox()[3] - 10.0
dy = max(dy, leaf.parent().node().bbox()[3] - leaf.bbox()[3] + 10)
leaf.move(0, dy)
def _tree_leaves(self, tree=None):
if tree is None: tree = self._tree
if isinstance(tree, TreeSegmentWidget):
leaves = []
for child in tree.subtrees(): leaves += self._tree_leaves(child)
return leaves
else:
return [tree]
#########################################
## Button Callbacks
#########################################
def destroy(self, *e):
self._autostep = 0
if self._top is None: return
self._top.destroy()
self._top = None
def reset(self, *e):
self._autostep = 0
self._parser.initialize(self._sent)
self._lastoper1['text'] = 'Reset Demo'
self._lastoper2['text'] = ''
self._redraw()
def autostep(self, *e):
if self._animation_frames.get() == 0:
self._animation_frames.set(2)
if self._autostep:
self._autostep = 0
else:
self._autostep = 1
self._step()
def cancel_autostep(self, *e):
#self._autostep_button['text'] = 'Autostep'
self._autostep = 0
# Make sure to stop auto-stepping if we get any user input.
def step(self, *e): self._autostep = 0; self._step()
def match(self, *e): self._autostep = 0; self._match()
def expand(self, *e): self._autostep = 0; self._expand()
def backtrack(self, *e): self._autostep = 0; self._backtrack()
def _step(self):
if self._animating_lock: return
# Try expanding, matching, and backtracking (in that order)
if self._expand(): pass
elif self._parser.untried_match() and self._match(): pass
elif self._backtrack(): pass
else:
self._lastoper1['text'] = 'Finished'
self._lastoper2['text'] = ''
self._autostep = 0
# Check if we just completed a parse.
if self._parser.currently_complete():
self._autostep = 0
self._lastoper2['text'] += ' [COMPLETE PARSE]'
def _expand(self, *e):
if self._animating_lock: return
old_frontier = self._parser.frontier()
rv = self._parser.expand()
if rv is not None:
self._lastoper1['text'] = 'Expand:'
self._lastoper2['text'] = rv
self._prodlist.selection_clear(0, 'end')
index = self._productions.index(rv)
self._prodlist.selection_set(index)
self._animate_expand(old_frontier[0])
return 1
else:
self._lastoper1['text'] = 'Expand:'
self._lastoper2['text'] = '(all expansions tried)'
return 0
def _match(self, *e):
if self._animating_lock: return
old_frontier = self._parser.frontier()
rv = self._parser.match()
if rv is not None:
self._lastoper1['text'] = 'Match:'
self._lastoper2['text'] = rv
self._animate_match(old_frontier[0])
return 1
else:
self._lastoper1['text'] = 'Match:'
self._lastoper2['text'] = '(failed)'
return 0
def _backtrack(self, *e):
if self._animating_lock: return
if self._parser.backtrack():
elt = self._parser.tree()
for i in self._parser.frontier()[0]:
elt = elt[i]
self._lastoper1['text'] = 'Backtrack'
self._lastoper2['text'] = ''
if isinstance(elt, Tree):
self._animate_backtrack(self._parser.frontier()[0])
else:
self._animate_match_backtrack(self._parser.frontier()[0])
return 1
else:
self._autostep = 0
self._lastoper1['text'] = 'Finished'
self._lastoper2['text'] = ''
return 0
def about(self, *e):
ABOUT = ("NLTK Recursive Descent Parser Demo\n"+
"Written by Edward Loper")
TITLE = 'About: Recursive Descent Parser Demo'
try:
from tkMessageBox import Message
Message(message=ABOUT, title=TITLE).show()
except:
ShowText(self._top, TITLE, ABOUT)
def help(self, *e):
self._autostep = 0
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(self._top, 'Help: Recursive Descent Parser Demo',
(__doc__).strip(), width=75, font='fixed')
except:
ShowText(self._top, 'Help: Recursive Descent Parser Demo',
(__doc__).strip(), width=75)
def postscript(self, *e):
self._autostep = 0
self._cframe.print_to_file()
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle(): return
self._top.mainloop(*args, **kwargs)
def resize(self, size=None):
if size is not None: self._size.set(size)
size = self._size.get()
self._font.configure(size=-(abs(size)))
self._boldfont.configure(size=-(abs(size)))
self._sysfont.configure(size=-(abs(size)))
self._bigfont.configure(size=-(abs(size+2)))
self._redraw()
#########################################
## Expand Production Selection
#########################################
def _toggle_grammar(self, *e):
if self._show_grammar.get():
self._prodframe.pack(fill='both', side='left', padx=2,
after=self._feedbackframe)
self._lastoper1['text'] = 'Show Grammar'
else:
self._prodframe.pack_forget()
self._lastoper1['text'] = 'Hide Grammar'
self._lastoper2['text'] = ''
# def toggle_grammar(self, *e):
# self._show_grammar = not self._show_grammar
# if self._show_grammar:
# self._prodframe.pack(fill='both', expand='y', side='left',
# after=self._feedbackframe)
# self._lastoper1['text'] = 'Show Grammar'
# else:
# self._prodframe.pack_forget()
# self._lastoper1['text'] = 'Hide Grammar'
# self._lastoper2['text'] = ''
def _prodlist_select(self, event):
selection = self._prodlist.curselection()
if len(selection) != 1: return
index = int(selection[0])
old_frontier = self._parser.frontier()
production = self._parser.expand(self._productions[index])
if production:
self._lastoper1['text'] = 'Expand:'
self._lastoper2['text'] = production
self._prodlist.selection_clear(0, 'end')
self._prodlist.selection_set(index)
self._animate_expand(old_frontier[0])
else:
# Reset the production selections.
self._prodlist.selection_clear(0, 'end')
for prod in self._parser.expandable_productions():
index = self._productions.index(prod)
self._prodlist.selection_set(index)
#########################################
## Animation
#########################################
def _animate_expand(self, treeloc):
oldwidget = self._get(self._tree, treeloc)
oldtree = oldwidget.parent()
top = not isinstance(oldtree.parent(), TreeSegmentWidget)
tree = self._parser.tree()
for i in treeloc:
tree = tree[i]
widget = tree_to_treesegment(self._canvas, tree,
node_font=self._boldfont,
leaf_color='white',
tree_width=2, tree_color='white',
node_color='white',
leaf_font=self._font)
widget.node()['color'] = '#20a050'
(oldx, oldy) = oldtree.node().bbox()[:2]
(newx, newy) = widget.node().bbox()[:2]
widget.move(oldx-newx, oldy-newy)
if top:
self._cframe.add_widget(widget, 0, 5)
widget.move(30-widget.node().bbox()[0], 0)
self._tree = widget
else:
oldtree.parent().replace_child(oldtree, widget)
# Move the children over so they don't overlap.
# Line the children up in a strange way.
if widget.subtrees():
dx = (oldx + widget.node().width()/2 -
widget.subtrees()[0].bbox()[0]/2 -
widget.subtrees()[0].bbox()[2]/2)
for subtree in widget.subtrees(): subtree.move(dx, 0)
self._makeroom(widget)
if top:
self._cframe.destroy_widget(oldtree)
else:
oldtree.destroy()
colors = ['gray%d' % (10*int(10*x/self._animation_frames.get()))
for x in range(self._animation_frames.get(),0,-1)]
# Move the text string down, if necessary.
dy = widget.bbox()[3] + 30 - self._canvas.coords(self._textline)[1]
if dy > 0:
for twidget in self._textwidgets: twidget.move(0, dy)
self._canvas.move(self._textline, 0, dy)
self._animate_expand_frame(widget, colors)
def _makeroom(self, treeseg):
"""
Make sure that no sibling tree bbox's overlap.
"""
parent = treeseg.parent()
if not isinstance(parent, TreeSegmentWidget): return
index = parent.subtrees().index(treeseg)
# Handle siblings to the right
rsiblings = parent.subtrees()[index+1:]
if rsiblings:
dx = treeseg.bbox()[2] - rsiblings[0].bbox()[0] + 10
for sibling in rsiblings: sibling.move(dx, 0)
# Handle siblings to the left
if index > 0:
lsibling = parent.subtrees()[index-1]
dx = max(0, lsibling.bbox()[2] - treeseg.bbox()[0] + 10)
treeseg.move(dx, 0)
# Keep working up the tree.
self._makeroom(parent)
def _animate_expand_frame(self, widget, colors):
if len(colors) > 0:
self._animating_lock = 1
widget['color'] = colors[0]
for subtree in widget.subtrees():
if isinstance(subtree, TreeSegmentWidget):
subtree.node()['color'] = colors[0]
else:
subtree['color'] = colors[0]
self._top.after(50, self._animate_expand_frame,
widget, colors[1:])
else:
widget['color'] = 'black'
for subtree in widget.subtrees():
if isinstance(subtree, TreeSegmentWidget):
subtree.node()['color'] = 'black'
else:
subtree['color'] = 'black'
self._redraw_quick()
widget.node()['color'] = 'black'
self._animating_lock = 0
if self._autostep: self._step()
def _animate_backtrack(self, treeloc):
# Flash red first, if we're animating.
if self._animation_frames.get() == 0: colors = []
else: colors = ['#a00000', '#000000', '#a00000']
colors += ['gray%d' % (10*int(10*x/(self._animation_frames.get())))
for x in range(1, self._animation_frames.get()+1)]
widgets = [self._get(self._tree, treeloc).parent()]
for subtree in widgets[0].subtrees():
if isinstance(subtree, TreeSegmentWidget):
widgets.append(subtree.node())
else:
widgets.append(subtree)
self._animate_backtrack_frame(widgets, colors)
def _animate_backtrack_frame(self, widgets, colors):
if len(colors) > 0:
self._animating_lock = 1
for widget in widgets: widget['color'] = colors[0]
self._top.after(50, self._animate_backtrack_frame,
widgets, colors[1:])
else:
for widget in widgets[0].subtrees():
widgets[0].remove_child(widget)
widget.destroy()
self._redraw_quick()
self._animating_lock = 0
if self._autostep: self._step()
def _animate_match_backtrack(self, treeloc):
widget = self._get(self._tree, treeloc)
node = widget.parent().node()
dy = (1.0 * (node.bbox()[3] - widget.bbox()[1] + 14) /
max(1, self._animation_frames.get()))
self._animate_match_backtrack_frame(self._animation_frames.get(),
widget, dy)
def _animate_match(self, treeloc):
widget = self._get(self._tree, treeloc)
dy = ((self._textwidgets[0].bbox()[1] - widget.bbox()[3] - 10.0) /
max(1, self._animation_frames.get()))
self._animate_match_frame(self._animation_frames.get(), widget, dy)
def _animate_match_frame(self, frame, widget, dy):
if frame > 0:
self._animating_lock = 1
widget.move(0, dy)
self._top.after(10, self._animate_match_frame,
frame-1, widget, dy)
else:
widget['color'] = '#006040'
self._redraw_quick()
self._animating_lock = 0
if self._autostep: self._step()
def _animate_match_backtrack_frame(self, frame, widget, dy):
if frame > 0:
self._animating_lock = 1
widget.move(0, dy)
self._top.after(10, self._animate_match_backtrack_frame,
frame-1, widget, dy)
else:
widget.parent().remove_child(widget)
widget.destroy()
self._animating_lock = 0
if self._autostep: self._step()
def edit_grammar(self, *e):
CFGEditor(self._top, self._parser.grammar(), self.set_grammar)
def set_grammar(self, grammar):
self._parser.set_grammar(grammar)
self._productions = list(grammar.productions())
self._prodlist.delete(0, 'end')
for production in self._productions:
self._prodlist.insert('end', (' %s' % production))
def edit_sentence(self, *e):
sentence = ' '.join(self._sent)
title = 'Edit Text'
instr = 'Enter a new sentence to parse.'
EntryDialog(self._top, sentence, instr, self.set_sentence, title)
def set_sentence(self, sentence):
self._sent = list(tokenize.whitespace(sentence)) #[XX] use tagged?
self.reset()
def demo():
"""
Create a recursive descent parser demo, using a simple grammar and
text.
"""
from en.parser.nltk_lite.parse import cfg
grammar = cfg.parse_grammar("""
# Grammatical productions.
S -> NP VP
NP -> Det N PP | Det N
VP -> V NP PP | V NP | V
PP -> P NP
# Lexical productions.
NP -> 'I'
Det -> 'the' | 'a'
N -> 'man' | 'park' | 'dog' | 'telescope'
V -> 'ate' | 'saw'
P -> 'in' | 'under' | 'with'
""")
sent = list(tokenize.whitespace('the dog saw a man in the park'))
RecursiveDescentDemo(grammar, sent).mainloop()
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: graphical representations package
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: __init__.py 3461 2006-10-07 01:11:53Z edloper $
"""
Tools for graphically displaying and interacting with the objects and
processing classes defined by the Toolkit. These tools are primarily
intended to help students visualize the objects that they create.
The graphical tools are typically built using X{canvas widgets}, each
of which encapsulates the graphical elements and bindings used to
display a complex object on a Tkinter C{Canvas}. For example, NLTK
defines canvas widgets for displaying trees and directed graphs, as
well as a number of simpler widgets. These canvas widgets make it
easier to build new graphical tools and demos. See the class
documentation for L{CanvasWidget} for more information.
The C{nltk.draw} module defines the abstract C{CanvasWidget} base
class, and a number of simple canvas widgets. The remaining canvas
widgets are defined by submodules, such as L{nltk.draw.tree}.
The C{nltk.draw} module also defines L{CanvasFrame}, which
encapsulates a C{Canvas} and its scrollbars. It uses a
L{ScrollWatcherWidget} to ensure that all canvas widgets contained on
its canvas are within the scroll region.
Acknowledgements: Many of the ideas behind the canvas widget system
are derived from C{CLIG}, a Tk-based grapher for linguistic data
structures. For more information, see U{the CLIG
homepage<http://www.ags.uni-sb.de/~konrad/clig.html>}.
@group Graphical Demonstrations: rdparser, srparser, chart
@group Widgets: tree, cfg, fsa, plot, tree_edit
@group Abstract Widget Superclasses: CanvasWidget, AbstractContainerWidget
@sort: CanvasWidget, AbstractContainerWidget
@group Canvas Widgets: BoxWidget, BracketWidget, OvalWidget, ParenWidget,
ScrollWatcherWidget, SequenceWidget, SpaceWidget, StackWidget,
SymbolWidget, TextWidget
@sort: TextWidget, SymbolWidget, BoxWidget, OvalWidget, ParenWidget,
BracketWidget, SequenceWidget, StackWidget, SpaceWidget,
ScrollWatcherWidget
@group Tkinter Widgets: CanvasFrame, EntryDialog, ShowText
"""
from Tkinter import *
##//////////////////////////////////////////////////////
## CanvasWidget
##//////////////////////////////////////////////////////
class CanvasWidget(object):
"""
A collection of graphical elements and bindings used to display a
complex object on a Tkinter C{Canvas}. A canvas widget is
responsible for managing the C{Canvas} tags and callback bindings
necessary to display and interact with the object. Canvas widgets
are often organized into hierarchies, where parent canvas widgets
control aspects of their child widgets.
Each canvas widget is bound to a single C{Canvas}. This C{Canvas}
is specified as the first argument to the C{CanvasWidget}'s
constructor.
Attributes
==========
Each canvas widget can support a variety of X{attributes}, which
control how the canvas widget is displayed. Some typical examples
attributes are C{color}, C{font}, and C{radius}. Each attribute
has a default value. This default value can be overridden in the
constructor, using keyword arguments of the form
C{attribute=value}:
>>> cn = CanvasText(c, 'test', color='red')
Attribute values can also be changed after a canvas widget has
been constructed, using the C{__setitem__} operator:
>>> cn['font'] = 'times'
The current value of an attribute value can be queried using the
C{__getitem__} operator:
>>> cn['color']
red
For a list of the attributes supported by a type of canvas widget,
see its class documentation.
Interaction
===========
The attribute C{'draggable'} controls whether the user can drag a
canvas widget around the canvas. By default, canvas widgets
are not draggable.
C{CanvasWidget} provides callback support for two types of user
interaction: clicking and dragging. The method C{bind_click}
registers a callback function that is called whenever the canvas
widget is clicked. The method C{bind_drag} registers a callback
function that is called after the canvas widget is dragged. If
the user clicks or drags a canvas widget with no registered
callback function, then the interaction event will propagate to
its parent. For each canvas widget, only one callback function
may be registered for an interaction event. Callback functions
can be deregistered with the C{unbind_click} and C{unbind_drag}
methods.
Subclassing
===========
C{CanvasWidget} is an abstract class. Subclasses are required to
implement the following methods:
- C{__init__}: Builds a new canvas widget. It must perform the
following three tasks (in order):
- Create any new graphical elements.
- Call C{_add_child_widget} on each child widget.
- Call the C{CanvasWidget} constructor.
- C{_tags}: Returns a list of the canvas tags for all graphical
elements managed by this canvas widget, not including
graphical elements managed by its child widgets.
- C{_manage}: Arranges the child widgets of this canvas widget.
This is typically only called when the canvas widget is
created.
- C{_update}: Update this canvas widget in response to a
change in a single child.
For C{CanvasWidget}s with no child widgets, the default
definitions for C{_manage} and C{_update} may be used.
If a subclass defines any attributes, then it should implement
C{__getitem__} and C{__setitem__}. If either of these methods is
called with an unknown attribute, then they should propagate the
request to C{CanvasWidget}.
Most subclasses implement a number of additional methods that
modify the C{CanvasWidget} in some way. These methods must call
C{parent.update(self)} after making any changes to the canvas
widget's graphical elements. The canvas widget must also call
C{parent.update(self)} after changing any attribute value that
affects the shape or position of the canvas widget's graphical
elements.
@type __canvas: C{Tkinter.Canvas}
@ivar __canvas: This C{CanvasWidget}'s canvas.
@type __parent: C{CanvasWidget} or C{None}
@ivar __parent: This C{CanvasWidget}'s hierarchical parent widget.
@type __children: C{list} of C{CanvasWidget}
@ivar __children: This C{CanvasWidget}'s hierarchical child widgets.
@type __updating: C{boolean}
@ivar __updating: Is this canvas widget currently performing an
update? If it is, then it will ignore any new update requests
from child widgets.
@type __draggable: C{boolean}
@ivar __draggable: Is this canvas widget draggable?
@type __press: C{event}
@ivar __press: The ButtonPress event that we're currently handling.
@type __drag_x: C{int}
@ivar __drag_x: Where it's been moved to (to find dx)
@type __drag_y: C{int}
@ivar __drag_y: Where it's been moved to (to find dy)
@type __callbacks: C{dictionary}
@ivar __callbacks: Registered callbacks. Currently, four keys are
used: C{1}, C{2}, C{3}, and C{'drag'}. The values are
callback functions. Each callback function takes a single
argument, which is the C{CanvasWidget} that triggered the
callback.
"""
def __init__(self, canvas, parent=None, **attribs):
"""
Create a new canvas widget. This constructor should only be
called by subclass constructors; and it should be called only
X{after} the subclass has constructed all graphical canvas
objects and registered all child widgets.
@param canvas: This canvas widget's canvas.
@type canvas: C{Tkinter.Canvas}
@param parent: This canvas widget's hierarchical parent.
@type parent: C{CanvasWidget}
@param attribs: The new canvas widget's attributes.
"""
if self.__class__ == CanvasWidget:
raise TypeError, 'CanvasWidget is an abstract base class'
if not isinstance(canvas, Canvas):
raise TypeError('Expected a canvas!')
self.__canvas = canvas
self.__parent = parent
# If the subclass constructor called _add_child_widget, then
# self.__children will already exist.
if not hasattr(self, '_CanvasWidget__children'): self.__children = []
# Is this widget hidden?
self.__hidden = 0
# Update control (prevents infinite loops)
self.__updating = 0
# Button-press and drag callback handling.
self.__press = None
self.__drag_x = self.__drag_y = 0
self.__callbacks = {}
self.__draggable = 0
# Set up attributes.
for (attr, value) in attribs.items(): self[attr] = value
# Manage this canvas widget
self._manage()
# Register any new bindings
for tag in self._tags():
self.__canvas.tag_bind(tag, '<ButtonPress-1>',
self.__press_cb)
self.__canvas.tag_bind(tag, '<ButtonPress-2>',
self.__press_cb)
self.__canvas.tag_bind(tag, '<ButtonPress-3>',
self.__press_cb)
##//////////////////////////////////////////////////////
## Inherited methods.
##//////////////////////////////////////////////////////
def bbox(self):
"""
@return: A bounding box for this C{CanvasWidget}. The bounding
box is a tuple of four coordinates, M{(xmin, ymin, xmax,
ymax)}, for a rectangle which encloses all of the canvas
widget's graphical elements. Bounding box coordinates are
specified with respect to the C{Canvas}'s coordinate
space.
@rtype: C{4-tuple} of C{int}s
"""
if self.__hidden: return (0,0,0,0)
if len(self.tags()) == 0: raise ValueError('No tags')
return self.__canvas.bbox(*self.tags())
def width(self):
"""
@return: The width of this canvas widget's bounding box, in
its C{Canvas}'s coordinate space.
@rtype: C{int}
"""
if len(self.tags()) == 0: raise ValueError('No tags')
bbox = self.__canvas.bbox(*self.tags())
return bbox[2]-bbox[0]
def height(self):
"""
@return: The height of this canvas widget's bounding box, in
its C{Canvas}'s coordinate space.
@rtype: C{int}
"""
if len(self.tags()) == 0: raise ValueError('No tags')
bbox = self.__canvas.bbox(*self.tags())
return bbox[3]-bbox[1]
def parent(self):
"""
@return: The hierarchical parent of this canvas widget.
C{self} is considered a subpart of its parent for
purposes of user interaction.
@rtype: C{CanvasWidget} or C{None}
"""
return self.__parent
def child_widgets(self):
"""
@return: A list of the hierarchical children of this canvas
widget. These children are considered part of C{self}
for purposes of user interaction.
@rtype: C{list} of C{CanvasWidget}
"""
return self.__children
def canvas(self):
"""
@return: The canvas that this canvas widget is bound to.
@rtype: C{Tkinter.Canvas}
"""
return self.__canvas
def move(self, dx, dy):
"""
Move this canvas widget by a given distance. In particular,
shift the canvas widget right by C{dx} pixels, and down by
C{dy} pixels. Both C{dx} and C{dy} may be negative, resulting
in leftward or upward movement.
@type dx: C{int}
@param dx: The number of pixels to move this canvas widget
rightwards.
@type dy: C{int}
@param dy: The number of pixels to move this canvas widget
downwards.
@rtype: C{None}
"""
if dx == dy == 0: return
for tag in self.tags():
self.__canvas.move(tag, dx, dy)
if self.__parent: self.__parent.update(self)
def moveto(self, x, y, anchor='NW'):
"""
Move this canvas widget to the given location. In particular,
shift the canvas widget such that the corner or side of the
bounding box specified by C{anchor} is at location (C{x},
C{y}).
@param x,y: The location that the canvas widget should be moved
to.
@param anchor: The corner or side of the canvas widget that
should be moved to the specified location. C{'N'}
specifies the top center; C{'NE'} specifies the top right
corner; etc.
"""
x1,y1,x2,y2 = self.bbox()
if anchor == 'NW': self.move(x-x1, y-y1)
if anchor == 'N': self.move(x-x1/2-x2/2, y-y1)
if anchor == 'NE': self.move(x-x2, y-y1)
if anchor == 'E': self.move(x-x2, y-y1/2-y2/2)
if anchor == 'SE': self.move(x-x2, y-y2)
if anchor == 'S': self.move(x-x1/2-x2/2, y-y2)
if anchor == 'SW': self.move(x-x1, y-y2)
if anchor == 'W': self.move(x-x1, y-y1/2-y2/2)
def destroy(self):
"""
Remove this C{CanvasWidget} from its C{Canvas}. After a
C{CanvasWidget} has been destroyed, it should not be accessed.
Note that you only need to destroy a top-level
C{CanvasWidget}; its child widgets will be destroyed
automatically. If you destroy a non-top-level
C{CanvasWidget}, then the entire top-level widget will be
destroyed.
@raise ValueError: if this C{CanvasWidget} has a parent.
@rtype: C{None}
"""
if self.__parent is not None:
self.__parent.destroy()
return
for tag in self.tags():
self.__canvas.tag_unbind(tag, '<ButtonPress-1>')
self.__canvas.tag_unbind(tag, '<ButtonPress-2>')
self.__canvas.tag_unbind(tag, '<ButtonPress-3>')
self.__canvas.delete(*self.tags())
self.__canvas = None
def update(self, child):
"""
Update the graphical display of this canvas widget, and all of
its ancestors, in response to a change in one of this canvas
widget's children.
@param child: The child widget that changed.
@type child: C{CanvasWidget}
"""
if self.__hidden or child.__hidden: return
# If we're already updating, then do nothing. This prevents
# infinite loops when _update modifies its children.
if self.__updating: return
self.__updating = 1
# Update this CanvasWidget.
self._update(child)
# Propagate update request to the parent.
if self.__parent: self.__parent.update(self)
# We're done updating.
self.__updating = 0
def manage(self):
"""
Arrange this canvas widget and all of its descendants.
@rtype: C{None}
"""
if self.__hidden: return
for child in self.__children: child.manage()
self._manage()
def tags(self):
"""
@return: a list of the canvas tags for all graphical
elements managed by this canvas widget, including
graphical elements managed by its child widgets.
@rtype: C{list} of C{int}
"""
if self.__canvas is None:
raise ValueError('Attempt to access a destroyed canvas widget')
tags = []
tags += self._tags()
for child in self.__children:
tags += child.tags()
return tags
def __setitem__(self, attr, value):
"""
Set the value of the attribute C{attr} to C{value}. See the
class documentation for a list of attributes supported by this
canvas widget.
@rtype: C{None}
"""
if attr == 'draggable':
self.__draggable = value
else:
raise ValueError('Unknown attribute %r' % attr)
def __getitem__(self, attr):
"""
@return: the value of the attribute C{attr}. See the class
documentation for a list of attributes supported by this
canvas widget.
@rtype: (any)
"""
if attr == 'draggable':
return self.__draggable
else:
raise ValueError('Unknown attribute %r' % attr)
def __repr__(self):
"""
@return: a string representation of this canvas widget.
@rtype: C{string}
"""
return '<%s>' % self.__class__.__name__
def hide(self):
"""
Temporarily hide this canvas widget.
@rtype: C{None}
"""
self.__hidden = 1
for tag in self.tags():
self.__canvas.itemconfig(tag, state='hidden')
def show(self):
"""
Show a hidden canvas widget.
@rtype: C{None}
"""
self.__hidden = 0
for tag in self.tags():
self.__canvas.itemconfig(tag, state='normal')
def hidden(self):
"""
@return: True if this canvas widget is hidden.
@rtype: C{boolean}
"""
return self.__hidden
##//////////////////////////////////////////////////////
## Callback interface
##//////////////////////////////////////////////////////
def bind_click(self, callback, button=1):
"""
Register a new callback that will be called whenever this
C{CanvasWidget} is clicked on.
@type callback: C{function}
@param callback: The callback function that will be called
whenever this C{CanvasWidget} is clicked. This function
will be called with this C{CanvasWidget} as its argument.
@type button: C{int}
@param button: Which button the user should use to click on
this C{CanvasWidget}. Typically, this should be 1 (left
button), 3 (right button), or 2 (middle button).
"""
self.__callbacks[button] = callback
def bind_drag(self, callback):
"""
Register a new callback that will be called after this
C{CanvasWidget} is dragged. This implicitly makes this
C{CanvasWidget} draggable.
@type callback: C{function}
@param callback: The callback function that will be called
whenever this C{CanvasWidget} is clicked. This function
will be called with this C{CanvasWidget} as its argument.
"""
self.__draggable = 1
self.__callbacks['drag'] = callback
def unbind_click(self, button=1):
"""
Remove a callback that was registered with C{bind_click}.
@type button: C{int}
@param button: Which button the user should use to click on
this C{CanvasWidget}. Typically, this should be 1 (left
button), 3 (right button), or 2 (middle button).
"""
try: del self.__callbacks[button]
except: pass
def unbind_drag(self):
"""
Remove a callback that was registered with C{bind_drag}.
"""
try: del self.__callbacks['drag']
except: pass
##//////////////////////////////////////////////////////
## Callback internals
##//////////////////////////////////////////////////////
def __press_cb(self, event):
"""
Handle a button-press event:
- record the button press event in C{self.__press}
- register a button-release callback.
- if this CanvasWidget or any of its ancestors are
draggable, then register the appropriate motion callback.
"""
# If we're already waiting for a button release, then ignore
# this new button press.
if (self.__canvas.bind('<ButtonRelease-1>') or
self.__canvas.bind('<ButtonRelease-2>') or
self.__canvas.bind('<ButtonRelease-3>')):
return
# Unbind motion (just in case; this shouldn't be necessary)
self.__canvas.unbind('<Motion>')
# Record the button press event.
self.__press = event
# If any ancestor is draggable, set up a motion callback.
# (Only if they pressed button number 1)
if event.num == 1:
widget = self
while widget is not None:
if widget['draggable']:
widget.__start_drag(event)
break
widget = widget.parent()
# Set up the button release callback.
self.__canvas.bind('<ButtonRelease-%d>' % event.num,
self.__release_cb)
def __start_drag(self, event):
"""
Begin dragging this object:
- register a motion callback
- record the drag coordinates
"""
self.__canvas.bind('<Motion>', self.__motion_cb)
self.__drag_x = event.x
self.__drag_y = event.y
def __motion_cb(self, event):
"""
Handle a motion event:
- move this object to the new location
- record the new drag coordinates
"""
self.move(event.x-self.__drag_x, event.y-self.__drag_y)
self.__drag_x = event.x
self.__drag_y = event.y
def __release_cb(self, event):
"""
Handle a release callback:
- unregister motion & button release callbacks.
- decide whether they clicked, dragged, or cancelled
- call the appropriate handler.
"""
# Unbind the button release & motion callbacks.
self.__canvas.unbind('<ButtonRelease-%d>' % event.num)
self.__canvas.unbind('<Motion>')
# Is it a click or a drag?
if (event.time - self.__press.time < 100 and
abs(event.x-self.__press.x) + abs(event.y-self.__press.y) < 5):
# Move it back, if we were dragging.
if self.__draggable and event.num == 1:
self.move(self.__press.x - self.__drag_x,
self.__press.y - self.__drag_y)
self.__click(event.num)
elif event.num == 1:
self.__drag()
self.__press = None
def __drag(self):
"""
If this C{CanvasWidget} has a drag callback, then call it;
otherwise, find the closest ancestor with a drag callback, and
call it. If no ancestors have a drag callback, do nothing.
"""
if self.__draggable:
if self.__callbacks.has_key('drag'):
cb = self.__callbacks['drag']
try:
cb(self)
except:
print 'Error in drag callback for %r' % self
elif self.__parent is not None:
self.__parent.__drag()
def __click(self, button):
"""
If this C{CanvasWidget} has a drag callback, then call it;
otherwise, find the closest ancestor with a click callback, and
call it. If no ancestors have a click callback, do nothing.
"""
if self.__callbacks.has_key(button):
cb = self.__callbacks[button]
#try:
cb(self)
#except:
# print 'Error in click callback for %r' % self
# raise
elif self.__parent is not None:
self.__parent.__click(button)
##//////////////////////////////////////////////////////
## Child/parent Handling
##//////////////////////////////////////////////////////
def _add_child_widget(self, child):
"""
Register a hierarchical child widget. The child will be
considered part of this canvas widget for purposes of user
interaction. C{_add_child_widget} has two direct effects:
- It sets C{child}'s parent to this canvas widget.
- It adds C{child} to the list of canvas widgets returned by
the C{child_widgets} member function.
@param child: The new child widget. C{child} must not already
have a parent.
@type child: C{CanvasWidget}
"""
if not hasattr(self, '_CanvasWidget__children'): self.__children = []
if child.__parent is not None:
raise ValueError('%s already has a parent', child)
child.__parent = self
self.__children.append(child)
def _remove_child_widget(self, child):
"""
Remove a hierarchical child widget. This child will no longer
be considered part of this canvas widget for purposes of user
interaction. C{_add_child_widget} has two direct effects:
- It sets C{child}'s parent to C{None}.
- It removes C{child} from the list of canvas widgets
returned by the C{child_widgets} member function.
@param child: The child widget to remove. C{child} must be a
child of this canvas widget.
@type child: C{CanvasWidget}
"""
self.__children.remove(child)
child.__parent = None
##//////////////////////////////////////////////////////
## Defined by subclass
##//////////////////////////////////////////////////////
def _tags(self):
"""
@return: a list of canvas tags for all graphical elements
managed by this canvas widget, not including graphical
elements managed by its child widgets.
@rtype: C{list} of C{int}
"""
raise AssertionError()
def _manage(self):
"""
Arrange the child widgets of this canvas widget. This method
is called when the canvas widget is initially created. It is
also called if the user calls the C{manage} method on this
canvas widget or any of its ancestors.
@rtype: C{None}
"""
pass
def _update(self, child):
"""
Update this canvas widget in response to a change in one of
its children.
@param child: The child that changed.
@type child: C{CanvasWidget}
@rtype: C{None}
"""
pass
##//////////////////////////////////////////////////////
## Basic widgets.
##//////////////////////////////////////////////////////
class TextWidget(CanvasWidget):
"""
A canvas widget that displays a single string of text.
Attributes:
- C{color}: the color of the text.
- C{font}: the font used to display the text.
- C{justify}: justification for multi-line texts. Valid values
are C{left}, C{center}, and C{right}.
- C{width}: the width of the text. If the text is wider than
this width, it will be line-wrapped at whitespace.
- C{draggable}: whether the text can be dragged by the user.
"""
def __init__(self, canvas, text, **attribs):
"""
Create a new text widget.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@type text: C{string}
@param text: The string of text to display.
@param attribs: The new canvas widget's attributes.
"""
self._text = text
self._tag = canvas.create_text(1, 1, text=text)
CanvasWidget.__init__(self, canvas, **attribs)
def __setitem__(self, attr, value):
if attr in ('color', 'font', 'justify', 'width'):
if attr == 'color': attr = 'fill'
self.canvas().itemconfig(self._tag, {attr:value})
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == 'width':
return int(self.canvas().itemcget(self._tag, attr))
elif attr in ('color', 'font', 'justify'):
if attr == 'color': attr = 'fill'
return self.canvas().itemcget(self._tag, attr)
else:
return CanvasWidget.__getitem__(self, attr)
def _tags(self): return [self._tag]
def text(self):
"""
@return: The text displayed by this text widget.
@rtype: C{string}
"""
return self.canvas().itemcget(self._tag, 'TEXT')
def set_text(self, text):
"""
Change the text that is displayed by this text widget.
@type text: C{string}
@param text: The string of text to display.
@rtype: C{None}
"""
self.canvas().itemconfig(self._tag, text=text)
if self.parent() is not None:
self.parent().update(self)
def __repr__(self):
return '[Text: %r]' % self._text
class SymbolWidget(TextWidget):
"""
A canvas widget that displays special symbols, such as the
negation sign and the exists operator. Symbols are specified by
name. Currently, the following symbol names are defined: C{neg},
C{disj}, C{conj}, C{lambda}, C{merge}, C{forall}, C{exists},
C{subseteq}, C{subset}, C{notsubset}, C{emptyset}, C{imp},
C{rightarrow}, C{equal}, C{notequal}, C{epsilon}.
Attributes:
- C{color}: the color of the text.
- C{draggable}: whether the text can be dragged by the user.
@cvar SYMBOLS: A dictionary mapping from symbols to the character
in the C{symbol} font used to render them.
"""
SYMBOLS = {'neg':'\330', 'disj':'\332', 'conj': '\331',
'lambda': '\154', 'merge': '\304',
'forall': '\042', 'exists': '\044',
'subseteq': '\315', 'subset': '\314',
'notsubset': '\313', 'emptyset': '\306',
'imp': '\336', 'rightarrow': chr(222), #'\256',
'equal': '\75', 'notequal': '\271',
'intersection': '\307', 'union': '\310',
'epsilon': 'e',
}
def __init__(self, canvas, symbol, **attribs):
"""
Create a new symbol widget.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@type symbol: C{string}
@param symbol: The name of the symbol to display.
@param attribs: The new canvas widget's attributes.
"""
attribs['font'] = 'symbol'
TextWidget.__init__(self, canvas, '', **attribs)
self.set_symbol(symbol)
def symbol(self):
"""
@return: the name of the symbol that is displayed by this
symbol widget.
@rtype: C{string}
"""
return self._symbol
def set_symbol(self, symbol):
"""
Change the symbol that is displayed by this symbol widget.
@type symbol: C{string}
@param symbol: The name of the symbol to display.
"""
if not SymbolWidget.SYMBOLS.has_key(symbol):
raise ValueError('Unknown symbol: %s' % symbol)
self._symbol = symbol
self.set_text(SymbolWidget.SYMBOLS[symbol])
def __repr__(self):
return '[Symbol: %r]' % self._symbol
# A staticmethod that displays all symbols.
def symbolsheet(size=20):
"""
Open a new Tkinter window that displays the entire alphabet
for the symbol font. This is useful for constructing the
L{SymbolWidget.SYMBOLS} dictionary.
"""
top = Tk()
def destroy(e, top=top): top.destroy()
top.bind('q', destroy)
Button(top, text='Quit', command=top.destroy).pack(side='bottom')
text = Text(top, font=('helvetica', -size), width=20, height=30)
text.pack(side='left')
sb=Scrollbar(top, command=text.yview)
text['yscrollcommand']=sb.set
sb.pack(side='right', fill='y')
text.tag_config('symbol', font=('symbol', -size))
for i in range(256):
if i in (0,10): continue # null and newline
for k,v in SymbolWidget.SYMBOLS.items():
if v == chr(i):
text.insert('end', '%-10s\t' % k)
break
else:
text.insert('end', '%-10d \t' % i)
text.insert('end', '[%s]\n' % chr(i), 'symbol')
top.mainloop()
symbolsheet = staticmethod(symbolsheet)
class AbstractContainerWidget(CanvasWidget):
"""
An abstract class for canvas widgets that contain a single child,
such as C{BoxWidget} and C{OvalWidget}. Subclasses must define
a constructor, which should create any new graphical elements and
then call the C{AbstractCanvasContainer} constructor. Subclasses
must also define the C{_update} method and the C{_tags} method;
and any subclasses that define attributes should define
C{__setitem__} and C{__getitem__}.
"""
def __init__(self, canvas, child, **attribs):
"""
Create a new container widget. This constructor should only
be called by subclass constructors.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@param child: The container's child widget. C{child} must not
have a parent.
@type child: C{CanvasWidget}
@param attribs: The new canvas widget's attributes.
"""
self._child = child
self._add_child_widget(child)
CanvasWidget.__init__(self, canvas, **attribs)
def _manage(self):
self._update(self._child)
def child(self):
"""
@return: The child widget contained by this container widget.
@rtype: C{CanvasWidget}
"""
return self._child
def set_child(self, child):
"""
Change the child widget contained by this container widget.
@param child: The new child widget. C{child} must not have a
parent.
@type child: C{CanvasWidget}
@rtype: C{None}
"""
self._remove_child_widget(self._child)
self._add_child_widget(child)
self._child = child
self.update(child)
def __repr__(self):
name = self.__class__.__name__
if name[-6:] == 'Widget': name = name[:-6]
return '[%s: %r]' % (name, self._child)
class BoxWidget(AbstractContainerWidget):
"""
A canvas widget that places a box around a child widget.
Attributes:
- C{fill}: The color used to fill the interior of the box.
- C{outline}: The color used to draw the outline of the box.
- C{width}: The width of the outline of the box.
- C{margin}: The number of pixels space left between the child
and the box.
- C{draggable}: whether the text can be dragged by the user.
"""
def __init__(self, canvas, child, **attribs):
"""
Create a new box widget.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@param child: The child widget. C{child} must not have a
parent.
@type child: C{CanvasWidget}
@param attribs: The new canvas widget's attributes.
"""
self._child = child
self._margin = 1
self._box = canvas.create_rectangle(1,1,1,1)
canvas.tag_lower(self._box)
AbstractContainerWidget.__init__(self, canvas, child, **attribs)
def __setitem__(self, attr, value):
if attr == 'margin': self._margin = value
elif attr in ('outline', 'fill', 'width'):
self.canvas().itemconfig(self._box, {attr:value})
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == 'margin': return self._margin
elif attr == 'width':
return float(self.canvas().itemcget(self._box, attr))
elif attr in ('outline', 'fill', 'width'):
return self.canvas().itemcget(self._box, attr)
else:
return CanvasWidget.__getitem__(self, attr)
def _update(self, child):
(x1, y1, x2, y2) = child.bbox()
margin = self._margin + self['width']/2
self.canvas().coords(self._box, x1-margin, y1-margin,
x2+margin, y2+margin)
def _tags(self): return [self._box]
class OvalWidget(AbstractContainerWidget):
"""
A canvas widget that places a oval around a child widget.
Attributes:
- C{fill}: The color used to fill the interior of the oval.
- C{outline}: The color used to draw the outline of the oval.
- C{width}: The width of the outline of the oval.
- C{margin}: The number of pixels space left between the child
and the oval.
- C{draggable}: whether the text can be dragged by the user.
- C{double}: If true, then a double-oval is drawn.
"""
def __init__(self, canvas, child, **attribs):
"""
Create a new oval widget.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@param child: The child widget. C{child} must not have a
parent.
@type child: C{CanvasWidget}
@param attribs: The new canvas widget's attributes.
"""
self._child = child
self._margin = 1
self._oval = canvas.create_oval(1,1,1,1)
self._circle = attribs.pop('circle', False)
self._double = attribs.pop('double', False)
if self._double:
self._oval2 = canvas.create_oval(1,1,1,1)
else:
self._oval2 = None
canvas.tag_lower(self._oval)
AbstractContainerWidget.__init__(self, canvas, child, **attribs)
def __setitem__(self, attr, value):
c = self.canvas()
if attr == 'margin': self._margin = value
elif attr == 'double':
if value==True and self._oval2 is None:
# Copy attributes & position from self._oval.
x1, y1, x2, y2 = c.bbox(self._oval)
w = self['width']*2
self._oval2 = c.create_oval(x1-w, y1-w, x2+w, y2+w,
outline=c.itemcget(self._oval, 'outline'),
width=c.itemcget(self._oval, 'width'))
c.tag_lower(self._oval2)
if value==False and self._oval2 is not None:
c.delete(self._oval2)
self._oval2 = None
elif attr in ('outline', 'fill', 'width'):
c.itemconfig(self._oval, {attr:value})
if self._oval2 is not None and attr!='fill':
c.itemconfig(self._oval2, {attr:value})
if self._oval2 is not None and attr!='fill':
self.canvas().itemconfig(self._oval2, {attr:value})
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == 'margin': return self._margin
elif attr == 'double': return self._double is not None
elif attr == 'width':
return float(self.canvas().itemcget(self._oval, attr))
elif attr in ('outline', 'fill', 'width'):
return self.canvas().itemcget(self._oval, attr)
else:
return CanvasWidget.__getitem__(self, attr)
# The ratio between inscribed & circumscribed ovals
RATIO = 1.4142135623730949
def _update(self, child):
R = OvalWidget.RATIO
(x1, y1, x2, y2) = child.bbox()
margin = self._margin
# If we're a circle, pretend our contents are square.
if self._circle:
dx, dy = abs(x1-x2), abs(y1-y2)
if dx > dy:
y = (y1+y2)/2
y1, y2 = y-dx/2, y+dx/2
elif dy > dx:
x = (x1+x2)/2
x1, x2 = x-dy/2, x+dy/2
# Find the four corners.
left = int(( x1*(1+R) + x2*(1-R) ) / 2)
right = left + int((x2-x1)*R)
top = int(( y1*(1+R) + y2*(1-R) ) / 2)
bot = top + int((y2-y1)*R)
self.canvas().coords(self._oval, left-margin, top-margin,
right+margin, bot+margin)
if self._oval2 is not None:
self.canvas().coords(self._oval2, left-margin+2, top-margin+2,
right+margin-2, bot+margin-2)
def _tags(self):
if self._oval2 is None:
return [self._oval]
else:
return [self._oval, self._oval2]
class ParenWidget(AbstractContainerWidget):
"""
A canvas widget that places a pair of parenthases around a child
widget.
Attributes:
- C{color}: The color used to draw the parenthases.
- C{width}: The width of the parenthases.
- C{draggable}: whether the text can be dragged by the user.
"""
def __init__(self, canvas, child, **attribs):
"""
Create a new parenthasis widget.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@param child: The child widget. C{child} must not have a
parent.
@type child: C{CanvasWidget}
@param attribs: The new canvas widget's attributes.
"""
self._child = child
self._oparen = canvas.create_arc(1,1,1,1, style='arc',
start=90, extent=180)
self._cparen = canvas.create_arc(1,1,1,1, style='arc',
start=-90, extent=180)
AbstractContainerWidget.__init__(self, canvas, child, **attribs)
def __setitem__(self, attr, value):
if attr == 'color':
self.canvas().itemconfig(self._oparen, outline=value)
self.canvas().itemconfig(self._cparen, outline=value)
elif attr == 'width':
self.canvas().itemconfig(self._oparen, width=value)
self.canvas().itemconfig(self._cparen, width=value)
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == 'color':
return self.canvas().itemcget(self._oparen, 'outline')
elif attr == 'width':
return self.canvas().itemcget(self._oparen, 'width')
else:
return CanvasWidget.__getitem__(self, attr)
def _update(self, child):
(x1, y1, x2, y2) = child.bbox()
width = max((y2-y1)/6, 4)
self.canvas().coords(self._oparen, x1-width, y1, x1+width, y2)
self.canvas().coords(self._cparen, x2-width, y1, x2+width, y2)
def _tags(self): return [self._oparen, self._cparen]
class BracketWidget(AbstractContainerWidget):
"""
A canvas widget that places a pair of brackets around a child
widget.
Attributes:
- C{color}: The color used to draw the brackets.
- C{width}: The width of the brackets.
- C{draggable}: whether the text can be dragged by the user.
"""
def __init__(self, canvas, child, **attribs):
"""
Create a new bracket widget.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@param child: The child widget. C{child} must not have a
parent.
@type child: C{CanvasWidget}
@param attribs: The new canvas widget's attributes.
"""
self._child = child
self._obrack = canvas.create_line(1,1,1,1,1,1,1,1)
self._cbrack = canvas.create_line(1,1,1,1,1,1,1,1)
AbstractContainerWidget.__init__(self, canvas, child, **attribs)
def __setitem__(self, attr, value):
if attr == 'color':
self.canvas().itemconfig(self._obrack, fill=value)
self.canvas().itemconfig(self._cbrack, fill=value)
elif attr == 'width':
self.canvas().itemconfig(self._obrack, width=value)
self.canvas().itemconfig(self._cbrack, width=value)
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == 'color':
return self.canvas().itemcget(self._obrack, 'outline')
elif attr == 'width':
return self.canvas().itemcget(self._obrack, 'width')
else:
return CanvasWidget.__getitem__(self, attr)
def _update(self, child):
(x1, y1, x2, y2) = child.bbox()
width = max((y2-y1)/8, 2)
self.canvas().coords(self._obrack, x1, y1, x1-width, y1,
x1-width, y2, x1, y2)
self.canvas().coords(self._cbrack, x2, y1, x2+width, y1,
x2+width, y2, x2, y2)
def _tags(self): return [self._obrack, self._cbrack]
class SequenceWidget(CanvasWidget):
"""
A canvas widget that keeps a list of canvas widgets in a
horizontal line.
Attributes:
- C{align}: The vertical alignment of the children. Possible
values are C{'top'}, C{'center'}, and C{'bottom'}. By
default, children are center-aligned.
- C{space}: The amount of horizontal space to place between
children. By default, one pixel of space is used.
- C{ordered}: If true, then keep the children in their
original order.
"""
def __init__(self, canvas, *children, **attribs):
"""
Create a new sequence widget.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@param children: The widgets that should be aligned
horizontally. Each child must not have a parent.
@type children: C{list} of C{CanvasWidget}
@param attribs: The new canvas widget's attributes.
"""
self._align = 'center'
self._space = 1
self._ordered = False
self._children = list(children)
for child in children: self._add_child_widget(child)
CanvasWidget.__init__(self, canvas, **attribs)
def __setitem__(self, attr, value):
if attr == 'align':
if value not in ('top', 'bottom', 'center'):
raise ValueError('Bad alignment: %r' % value)
self._align = value
elif attr == 'space': self._space = value
elif attr == 'ordered': self._ordered = value
else: CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == 'align': return value
elif attr == 'space': return self._space
elif attr == 'ordered': return self._ordered
else: return CanvasWidget.__getitem__(self, attr)
def _tags(self): return []
def _yalign(self, top, bot):
if self._align == 'top': return top
if self._align == 'bottom': return bot
if self._align == 'center': return (top+bot)/2
def _update(self, child):
# Align all children with child.
(left, top, right, bot) = child.bbox()
y = self._yalign(top, bot)
for c in self._children:
(x1, y1, x2, y2) = c.bbox()
c.move(0, y-self._yalign(y1,y2))
if self._ordered and len(self._children) > 1:
index = self._children.index(child)
x = right + self._space
for i in range(index+1, len(self._children)):
(x1, y1, x2, y2) = self._children[i].bbox()
if x > x1:
self._children[i].move(x-x1, 0)
x += x2-x1 + self._space
x = left - self._space
for i in range(index-1, -1, -1):
(x1, y1, x2, y2) = self._children[i].bbox()
if x < x2:
self._children[i].move(x-x2, 0)
x -= x2-x1 + self._space
def _manage(self):
if len(self._children) == 0: return
child = self._children[0]
# Align all children with child.
(left, top, right, bot) = child.bbox()
y = self._yalign(top, bot)
index = self._children.index(child)
# Line up children to the right of child.
x = right + self._space
for i in range(index+1, len(self._children)):
(x1, y1, x2, y2) = self._children[i].bbox()
self._children[i].move(x-x1, y-self._yalign(y1,y2))
x += x2-x1 + self._space
# Line up children to the left of child.
x = left - self._space
for i in range(index-1, -1, -1):
(x1, y1, x2, y2) = self._children[i].bbox()
self._children[i].move(x-x2, y-self._yalign(y1,y2))
x -= x2-x1 + self._space
def __repr__(self):
return '[Sequence: ' + `self._children`[1:-1]+']'
# Provide an alias for the child_widgets() member.
children = CanvasWidget.child_widgets
def replace_child(self, oldchild, newchild):
"""
Replace the child canvas widget C{oldchild} with C{newchild}.
C{newchild} must not have a parent. C{oldchild}'s parent will
be set to C{None}.
@type oldchild: C{CanvasWidget}
@param oldchild: The child canvas widget to remove.
@type newchild: C{CanvasWidget}
@param newchild: The canvas widget that should replace
C{oldchild}.
"""
index = self._children.index(oldchild)
self._children[index] = newchild
self._remove_child_widget(oldchild)
self._add_child_widget(newchild)
self.update(newchild)
def remove_child(self, child):
"""
Remove the given child canvas widget. C{child}'s parent will
be set ot None.
@type child: C{CanvasWidget}
@param child: The child canvas widget to remove.
"""
index = self._children.index(child)
del self._children[index]
self._remove_child_widget(child)
if len(self._children) > 0:
self.update(self._children[0])
def insert_child(self, index, child):
"""
Insert a child canvas widget before a given index.
@type child: C{CanvasWidget}
@param child: The canvas widget that should be inserted.
@type index: C{int}
@param index: The index where the child widget should be
inserted. In particular, the index of C{child} will be
C{index}; and the index of any children whose indices were
greater than equal to C{index} before C{child} was
inserted will be incremented by one.
"""
self._children.insert(index, child)
self._add_child_widget(child)
class StackWidget(CanvasWidget):
"""
A canvas widget that keeps a list of canvas widgets in a vertical
line.
Attributes:
- C{align}: The horizontal alignment of the children. Possible
values are C{'left'}, C{'center'}, and C{'right'}. By
default, children are center-aligned.
- C{space}: The amount of vertical space to place between
children. By default, one pixel of space is used.
- C{ordered}: If true, then keep the children in their
original order.
"""
def __init__(self, canvas, *children, **attribs):
"""
Create a new stack widget.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@param children: The widgets that should be aligned
vertically. Each child must not have a parent.
@type children: C{list} of C{CanvasWidget}
@param attribs: The new canvas widget's attributes.
"""
self._align = 'center'
self._space = 1
self._ordered = False
self._children = list(children)
for child in children: self._add_child_widget(child)
CanvasWidget.__init__(self, canvas, **attribs)
def __setitem__(self, attr, value):
if attr == 'align':
if value not in ('left', 'right', 'center'):
raise ValueError('Bad alignment: %r' % value)
self._align = value
elif attr == 'space': self._space = value
elif attr == 'ordered': self._ordered = value
else: CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == 'align': return value
elif attr == 'space': return self._space
elif attr == 'ordered': return self._ordered
else: return CanvasWidget.__getitem__(self, attr)
def _tags(self): return []
def _xalign(self, left, right):
if self._align == 'left': return left
if self._align == 'right': return right
if self._align == 'center': return (left+right)/2
def _update(self, child):
# Align all children with child.
(left, top, right, bot) = child.bbox()
x = self._xalign(left, right)
for c in self._children:
(x1, y1, x2, y2) = c.bbox()
c.move(x-self._xalign(x1,x2), 0)
if self._ordered and len(self._children) > 1:
index = self._children.index(child)
y = bot + self._space
for i in range(index+1, len(self._children)):
(x1, y1, x2, y2) = self._children[i].bbox()
if y > y1:
self._children[i].move(0, y-y1)
y += y2-y1 + self._space
y = top - self._space
for i in range(index-1, -1, -1):
(x1, y1, x2, y2) = self._children[i].bbox()
if y < y2:
self._children[i].move(0, y-y2)
y -= y2-y1 + self._space
def _manage(self):
if len(self._children) == 0: return
child = self._children[0]
# Align all children with child.
(left, top, right, bot) = child.bbox()
x = self._xalign(left, right)
index = self._children.index(child)
# Line up children below the child.
y = bot + self._space
for i in range(index+1, len(self._children)):
(x1, y1, x2, y2) = self._children[i].bbox()
self._children[i].move(x-self._xalign(x1,x2), y-y1)
y += y2-y1 + self._space
# Line up children above the child.
y = top - self._space
for i in range(index-1, -1, -1):
(x1, y1, x2, y2) = self._children[i].bbox()
self._children[i].move(x-self._xalign(x1,x2), y-y2)
y -= y2-y1 + self._space
def __repr__(self):
return '[Stack: ' + `self._children`[1:-1]+']'
# Provide an alias for the child_widgets() member.
children = CanvasWidget.child_widgets
def replace_child(self, oldchild, newchild):
"""
Replace the child canvas widget C{oldchild} with C{newchild}.
C{newchild} must not have a parent. C{oldchild}'s parent will
be set to C{None}.
@type oldchild: C{CanvasWidget}
@param oldchild: The child canvas widget to remove.
@type newchild: C{CanvasWidget}
@param newchild: The canvas widget that should replace
C{oldchild}.
"""
index = self._children.index(oldchild)
self._children[index] = newchild
self._remove_child_widget(oldchild)
self._add_child_widget(newchild)
self.update(newchild)
def remove_child(self, child):
"""
Remove the given child canvas widget. C{child}'s parent will
be set ot None.
@type child: C{CanvasWidget}
@param child: The child canvas widget to remove.
"""
index = self._children.index(child)
del self._children[index]
self._remove_child_widget(child)
if len(self._children) > 0:
self.update(self._children[0])
def insert_child(self, index, child):
"""
Insert a child canvas widget before a given index.
@type child: C{CanvasWidget}
@param child: The canvas widget that should be inserted.
@type index: C{int}
@param index: The index where the child widget should be
inserted. In particular, the index of C{child} will be
C{index}; and the index of any children whose indices were
greater than equal to C{index} before C{child} was
inserted will be incremented by one.
"""
self._children.insert(index, child)
self._add_child_widget(child)
class SpaceWidget(CanvasWidget):
"""
A canvas widget that takes up space but does not display
anything. C{SpaceWidget}s can be used to add space between
elements. Each space widget is characterized by a width and a
height. If you wish to only create horizontal space, then use a
height of zero; and if you wish to only create vertical space, use
a width of zero.
"""
def __init__(self, canvas, width, height, **attribs):
"""
Create a new space widget.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@type width: C{int}
@param width: The width of the new space widget.
@type height: C{int}
@param height: The height of the new space widget.
@param attribs: The new canvas widget's attributes.
"""
# For some reason,
if width > 4: width -= 4
if height > 4: height -= 4
self._tag = canvas.create_line(1, 1, width, height, fill='')
CanvasWidget.__init__(self, canvas, **attribs)
# note: width() and height() are already defined by CanvasWidget.
def set_width(self, width):
"""
Change the width of this space widget.
@param width: The new width.
@type width: C{int}
@rtype: C{None}
"""
[x1, y1, x2, y2] = self.bbox()
self.canvas().coords(self._tag, x1, y1, x1+width, y2)
def set_height(self, height):
"""
Change the height of this space widget.
@param height: The new height.
@type height: C{int}
@rtype: C{None}
"""
[x1, y1, x2, y2] = self.bbox()
self.canvas().coords(self._tag, x1, y1, x2, y1+height)
def _tags(self): return [self._tag]
def __repr__(self): return '[Space]'
class ScrollWatcherWidget(CanvasWidget):
"""
A special canvas widget that adjusts its C{Canvas}'s scrollregion
to always include the bounding boxes of all of its children. The
scroll-watcher widget will only increase the size of the
C{Canvas}'s scrollregion; it will never decrease it.
"""
def __init__(self, canvas, *children, **attribs):
"""
Create a new scroll-watcher widget.
@type canvas: C{Tkinter.Canvas}
@param canvas: This canvas widget's canvas.
@type children: C{list} of C{CanvasWidget}
@param children: The canvas widgets watched by the
scroll-watcher. The scroll-watcher will ensure that these
canvas widgets are always contained in their canvas's
scrollregion.
@param attribs: The new canvas widget's attributes.
"""
for child in children: self._add_child_widget(child)
CanvasWidget.__init__(self, canvas, **attribs)
def add_child(self, canvaswidget):
"""
Add a new canvas widget to the scroll-watcher. The
scroll-watcher will ensure that the new canvas widget is
always contained in its canvas's scrollregion.
@param canvaswidget: The new canvas widget.
@type canvaswidget: C{CanvasWidget}
@rtype: C{None}
"""
self._add_child_widget(canvaswidget)
self.update(canvaswidget)
def remove_child(self, canvaswidget):
"""
Remove a canvas widget from the scroll-watcher. The
scroll-watcher will no longer ensure that the new canvas
widget is always contained in its canvas's scrollregion.
@param canvaswidget: The canvas widget to remove.
@type canvaswidget: C{CanvasWidget}
@rtype: C{None}
"""
self._remove_child_widget(canvaswidget)
def _tags(self): return []
def _update(self, child):
self._adjust_scrollregion()
def _adjust_scrollregion(self):
"""
Adjust the scrollregion of this scroll-watcher's C{Canvas} to
include the bounding boxes of all of its children.
"""
bbox = self.bbox()
canvas = self.canvas()
scrollregion = [int(n) for n in canvas['scrollregion'].split()]
if len(scrollregion) != 4: return
if (bbox[0] < scrollregion[0] or bbox[1] < scrollregion[1] or
bbox[2] > scrollregion[2] or bbox[3] > scrollregion[3]):
scrollregion = ('%d %d %d %d' %
(min(bbox[0], scrollregion[0]),
min(bbox[1], scrollregion[1]),
max(bbox[2], scrollregion[2]),
max(bbox[3], scrollregion[3])))
canvas['scrollregion'] = scrollregion
##//////////////////////////////////////////////////////
## Canvas Frame
##//////////////////////////////////////////////////////
class CanvasFrame(object):
"""
A C{Tkinter} frame containing a canvas and scrollbars.
C{CanvasFrame} uses a C{ScrollWatcherWidget} to ensure that all of
the canvas widgets contained on its canvas are within its
scrollregion. In order for C{CanvasFrame} to make these checks,
all canvas widgets must be registered with C{add_widget} when they
are added to the canvas; and destroyed with C{destroy_widget} when
they are no longer needed.
If a C{CanvasFrame} is created with no parent, then it will create
its own main window, including a "Done" button and a "Print"
button.
"""
def __init__(self, parent=None, **kw):
"""
Create a new C{CanvasFrame}.
@type parent: C{Tkinter.BaseWidget} or C{Tkinter.Tk}
@param parent: The parent C{Tkinter} widget. If no parent is
specified, then C{CanvasFrame} will create a new main
window.
@param kw: Keyword arguments for the new C{Canvas}. See the
documentation for C{Tkinter.Canvas} for more information.
"""
# If no parent was given, set up a top-level window.
if parent is None:
self._parent = Tk()
self._parent.title('NLTK')
self._parent.bind('<Control-p>', lambda e: self.print_to_file())
self._parent.bind('<Control-x>', self.destroy)
self._parent.bind('<Control-q>', self.destroy)
else:
self._parent = parent
# Create a frame for the canvas & scrollbars
self._frame = frame = Frame(self._parent)
self._canvas = canvas = Canvas(frame, **kw)
xscrollbar = Scrollbar(self._frame, orient='horizontal')
yscrollbar = Scrollbar(self._frame, orient='vertical')
xscrollbar['command'] = canvas.xview
yscrollbar['command'] = canvas.yview
canvas['xscrollcommand'] = xscrollbar.set
canvas['yscrollcommand'] = yscrollbar.set
yscrollbar.pack(fill='y', side='right')
xscrollbar.pack(fill='x', side='bottom')
canvas.pack(expand=1, fill='both', side='left')
# Set initial scroll region.
scrollregion = '0 0 %s %s' % (canvas['width'], canvas['height'])
canvas['scrollregion'] = scrollregion
self._scrollwatcher = ScrollWatcherWidget(canvas)
# If no parent was given, pack the frame, and add a menu.
if parent is None:
self.pack(expand=1, fill='both')
self._init_menubar()
def _init_menubar(self):
menubar = Menu(self._parent)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='Print to Postscript', underline=0,
command=self.print_to_file, accelerator='Ctrl-p')
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
self._parent.config(menu=menubar)
def print_to_file(self, filename=None):
"""
Print the contents of this C{CanvasFrame} to a postscript
file. If no filename is given, then prompt the user for one.
@param filename: The name of the file to print the tree to.
@type filename: C{string}
@rtype: C{None}
"""
if filename is None:
from tkFileDialog import asksaveasfilename
ftypes = [('Postscript files', '.ps'),
('All files', '*')]
filename = asksaveasfilename(filetypes=ftypes,
defaultextension='.ps')
if not filename: return
(x0, y0, w, h) = self.scrollregion()
self._canvas.postscript(file=filename, x=x0, y=y0,
width=w+2, height=h+2,
pagewidth=w+2, # points = 1/72 inch
pageheight=h+2, # points = 1/72 inch
pagex=0, pagey=0)
def scrollregion(self):
"""
@return: The current scroll region for the canvas managed by
this C{CanvasFrame}.
@rtype: 4-tuple of C{int}
"""
(x1, y1, x2, y2) = self._canvas['scrollregion'].split()
return (int(x1), int(y1), int(x2), int(y2))
def canvas(self):
"""
@return: The canvas managed by this C{CanvasFrame}.
@rtype: C{Tkinter.Canvas}
"""
return self._canvas
def add_widget(self, canvaswidget, x=None, y=None):
"""
Register a canvas widget with this C{CanvasFrame}. The
C{CanvasFrame} will ensure that this canvas widget is always
within the C{Canvas}'s scrollregion. If no coordinates are
given for the canvas widget, then the C{CanvasFrame} will
attempt to find a clear area of the canvas for it.
@type canvaswidget: C{CanvasWidget}
@param canvaswidget: The new canvas widget. C{canvaswidget}
must have been created on this C{CanvasFrame}'s canvas.
@type x: C{int}
@param x: The initial x coordinate for the upper left hand
corner of C{canvaswidget}, in the canvas's coordinate
space.
@type y: C{int}
@param y: The initial y coordinate for the upper left hand
corner of C{canvaswidget}, in the canvas's coordinate
space.
"""
if x is None or y is None:
(x, y) = self._find_room(canvaswidget, x, y)
# Move to (x,y)
(x1,y1,x2,y2) = canvaswidget.bbox()
canvaswidget.move(x-x1,y-y1)
# Register with scrollwatcher.
self._scrollwatcher.add_child(canvaswidget)
def _find_room(self, widget, desired_x, desired_y):
"""
Try to find a space for a given widget.
"""
(left, top, right, bot) = self.scrollregion()
w = widget.width()
h = widget.height()
if w >= (right-left): return (0,0)
if h >= (bot-top): return (0,0)
# Move the widget out of the way, for now.
(x1,y1,x2,y2) = widget.bbox()
widget.move(left-x2-50, top-y2-50)
if desired_x is not None:
x = desired_x
for y in range(top, bot-h, (bot-top-h)/10):
if not self._canvas.find_overlapping(x-5, y-5, x+w+5, y+h+5):
return (x,y)
if desired_y is not None:
y = desired_y
for x in range(left, right-w, (right-left-w)/10):
if not self._canvas.find_overlapping(x-5, y-5, x+w+5, y+h+5):
return (x,y)
for y in range(top, bot-h, (bot-top-h)/10):
for x in range(left, right-w, (right-left-w)/10):
if not self._canvas.find_overlapping(x-5, y-5, x+w+5, y+h+5):
return (x,y)
return (0,0)
def destroy_widget(self, canvaswidget):
"""
Remove a canvas widget from this C{CanvasFrame}. This
deregisters the canvas widget, and destroys it.
"""
self.remove_widget(canvaswidget)
canvaswidget.destroy()
def remove_widget(self, canvaswidget):
# Deregister with scrollwatcher.
self._scrollwatcher.remove_child(canvaswidget)
def pack(self, cnf={}, **kw):
"""
Pack this C{CanvasFrame}. See the documentation for
C{Tkinter.Pack} for more information.
"""
self._frame.pack(cnf, **kw)
# Adjust to be big enough for kids?
def destroy(self, *e):
"""
Destroy this C{CanvasFrame}. If this C{CanvasFrame} created a
top-level window, then this will close that window.
"""
if self._parent is None: return
self._parent.destroy()
self._parent = None
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this frame is created from a non-interactive program (e.g.
from a secript); otherwise, the frame will close as soon as
the script completes.
"""
if in_idle(): return
self._parent.mainloop(*args, **kwargs)
##//////////////////////////////////////////////////////
## Text display
##//////////////////////////////////////////////////////
class ShowText(object):
"""
A C{Tkinter} window used to display a text. C{ShowText} is
typically used by graphical tools to display help text, or similar
information.
"""
def __init__(self, root, title, text, width=None, height=None,
**textbox_options):
if width is None or height is None:
(width, height) = self.find_dimentions(text, width, height)
# Create the main window.
if root is None:
self._top = top = Tk()
else:
self._top = top = Toplevel(root)
top.title(title)
b = Button(top, text='Ok', command=self.destroy)
b.pack(side='bottom')
tbf = Frame(top)
tbf.pack(expand=1, fill='both')
scrollbar = Scrollbar(tbf, orient='vertical')
scrollbar.pack(side='right', fill='y')
textbox = Text(tbf, wrap='word', width=width,
height=height, **textbox_options)
textbox.insert('end', text)
textbox['state'] = 'disabled'
textbox.pack(side='left', expand=1, fill='both')
scrollbar['command'] = textbox.yview
textbox['yscrollcommand'] = scrollbar.set
# Make it easy to close the window.
top.bind('q', self.destroy)
top.bind('x', self.destroy)
top.bind('c', self.destroy)
top.bind('<Return>', self.destroy)
top.bind('<Escape>', self.destroy)
# Focus the scrollbar, so they can use up/down, etc.
scrollbar.focus()
def find_dimentions(self, text, width, height):
lines = text.split('\n')
if width is None:
maxwidth = max([len(line) for line in lines])
width = min(maxwidth, 80)
# Now, find height.
height = 0
for line in lines:
while len(line) > width:
brk = line[:width].rfind(' ')
line = line[brk:]
height += 1
height += 1
height = min(height, 25)
return (width, height)
def destroy(self, *e):
if self._top is None: return
self._top.destroy()
self._top = None
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this window is created from a non-interactive program (e.g.
from a secript); otherwise, the window will close as soon as
the script completes.
"""
if in_idle(): return
self._top.mainloop(*args, **kwargs)
##//////////////////////////////////////////////////////
## Entry dialog
##//////////////////////////////////////////////////////
class EntryDialog(object):
"""
A dialog box for entering
"""
def __init__(self, parent, original_text='', instructions='',
set_callback=None, title=None):
self._parent = parent
self._original_text = original_text
self._set_callback = set_callback
width = max(30, len(original_text)*3/2)
self._top = Toplevel(parent)
if title: self._top.title(title)
# The text entry box.
entryframe = Frame(self._top)
entryframe.pack(expand=1, fill='both', padx=5, pady=5,ipady=10)
if instructions:
l=Label(entryframe, text=instructions)
l.pack(side='top', anchor='w', padx=30)
self._entry = Entry(entryframe, width=width)
self._entry.pack(expand=1, fill='x', padx=30)
self._entry.insert(0, original_text)
# A divider
divider = Frame(self._top, borderwidth=1, relief='sunken')
divider.pack(fill='x', ipady=1, padx=10)
# The buttons.
buttons = Frame(self._top)
buttons.pack(expand=0, fill='x', padx=5, pady=5)
b = Button(buttons, text='Cancel', command=self._cancel, width=8)
b.pack(side='right', padx=5)
b = Button(buttons, text='Ok', command=self._ok,
width=8, default='active')
b.pack(side='left', padx=5)
b = Button(buttons, text='Apply', command=self._apply, width=8)
b.pack(side='left')
self._top.bind('<Return>', self._ok)
self._top.bind('<Control-q>', self._cancel)
self._top.bind('<Escape>', self._cancel)
self._entry.focus()
def _reset(self, *e):
self._entry.delete(0,'end')
self._entry.insert(0, self._original_text)
if self._set_callback:
self._set_callback(self._original_text)
def _cancel(self, *e):
try: self._reset()
except: pass
self._destroy()
def _ok(self, *e):
self._apply()
self._destroy()
def _apply(self, *e):
if self._set_callback:
self._set_callback(self._entry.get())
def _destroy(self, *e):
if self._top is None: return
self._top.destroy()
self._top = None
##//////////////////////////////////////////////////////
## Colorized List
##//////////////////////////////////////////////////////
class ColorizedList(object):
"""
An abstract base class for displaying a colorized list of items.
Subclasses should define:
- L{_init_colortags}, which sets up Text color tags that
will be used by the list.
- L{_item_repr}, which returns a list of (text,colortag)
tuples that make up the colorized representation of the
item.
@note: Typically, you will want to register a callback for
C{'select'} that calls L{mark} on the given item.
"""
def __init__(self, parent, items=[], **options):
"""
Construct a new list.
@param parent: The Tk widget that contains the colorized list
@param items: The initial contents of the colorized list.
@param options:
"""
self._parent = parent
self._callbacks = {}
# Which items are marked?
self._marks = {}
# Initialize the Tkinter frames.
self._init_itemframe(options.copy())
# Set up key & mouse bindings.
self._textwidget.bind('<KeyPress>', self._keypress)
self._textwidget.bind('<ButtonPress>', self._buttonpress)
# Fill in the given CFG's items.
self._items = None
self.set(items)
#////////////////////////////////////////////////////////////
# Abstract methods
#////////////////////////////////////////////////////////////
def _init_colortags(self, textwidget, options):
"""
Set up any colortags that will be used by this colorized list.
E.g.:
>>> textwidget.tag_config('terminal', foreground='black')
"""
raise AssertionError, 'Abstract base class'
def _item_repr(self, item):
"""
Return a list of (text, colortag) tuples that make up the
colorized representation of the item. Colorized
representations may not span multiple lines. I.e., the text
strings returned may not contain newline characters.
"""
raise AssertionError, 'Abstract base class'
#////////////////////////////////////////////////////////////
# Item Access
#////////////////////////////////////////////////////////////
def get(self, index=None):
"""
@return: A list of the items contained by this list.
"""
if index is None:
return self._items[:]
else:
return self._items[index]
def set(self, items):
"""
Modify the list of items contained by this list.
"""
items = list(items)
if self._items == items: return
self._items = list(items)
self._textwidget['state'] = 'normal'
self._textwidget.delete('1.0', 'end')
for item in items:
for (text, colortag) in self._item_repr(item):
assert '\n' not in text, 'item repr may not contain newline'
self._textwidget.insert('end', text, colortag)
self._textwidget.insert('end', '\n')
# Remove the final newline
self._textwidget.delete('end-1char', 'end')
self._textwidget.mark_set('insert', '1.0')
self._textwidget['state'] = 'disabled'
# Clear all marks
self._marks.clear()
def unmark(self, item=None):
"""
Remove highlighting from the given item; or from every item,
if no item is given.
@raise ValueError: If C{item} is not contained in the list.
@raise KeyError: If C{item} is not marked.
"""
if item is None:
self._marks.clear()
self._textwidget.tag_remove('highlight', '1.0', 'end+1char')
else:
index = self._items.index(item)
del self._marks[item]
(start, end) = ('%d.0' % (index+1), '%d.0' % (index+2))
self._textwidget.tag_remove('highlight', start, end)
def mark(self, item):
"""
Highlight the given item.
@raise ValueError: If C{item} is not contained in the list.
"""
self._marks[item] = 1
index = self._items.index(item)
(start, end) = ('%d.0' % (index+1), '%d.0' % (index+2))
self._textwidget.tag_add('highlight', start, end)
def markonly(self, item):
"""
Remove any current highlighting, and mark the given item.
@raise ValueError: If C{item} is not contained in the list.
"""
self.unmark()
self.mark(item)
def view(self, item):
"""
Adjust the view such that the given item is visible. If
the item is already visible, then do nothing.
"""
index = self._items.index(item)
self._textwidget.see('%d.0' % (index+1))
#////////////////////////////////////////////////////////////
# Callbacks
#////////////////////////////////////////////////////////////
def add_callback(self, event, func):
"""
Register a callback function with the list. This function
will be called whenever the given event occurs.
@param event: The event that will trigger the callback
function. Valid events are: click1, click2, click3,
space, return, select, up, down, next, prior, move
@param func: The function that should be called when
the event occurs. C{func} will be called with a
single item as its argument. (The item selected
or the item moved to).
"""
if event == 'select': events = ['click1', 'space', 'return']
elif event == 'move': events = ['up', 'down', 'next', 'prior']
else: events = [event]
for e in events:
self._callbacks.setdefault(e,{})[func] = 1
def remove_callback(self, event, func=None):
"""
Deregister a callback function. If C{func} is none, then
all callbacks are removed for the given event.
"""
if event is None: events = self._callbacks.keys()
elif event == 'select': events = ['click1', 'space', 'return']
elif event == 'move': events = ['up', 'down', 'next', 'prior']
else: events = [event]
for e in events:
if func is None: del self._callbacks[e]
else:
try: del self._callbacks[e][func]
except: pass
#////////////////////////////////////////////////////////////
# Tkinter Methods
#////////////////////////////////////////////////////////////
def pack(self, cnf={}, **kw):
# "@include: Tkinter.Pack.pack"
self._itemframe.pack(cnf, **kw)
def grid(self, cnf={}, **kw):
# "@include: Tkinter.Grid.grid"
self._itemframe.grid(cnf, *kw)
def focus(self):
# "@include: Tkinter.Widget.focus"
self._textwidget.focus()
#////////////////////////////////////////////////////////////
# Internal Methods
#////////////////////////////////////////////////////////////
def _init_itemframe(self, options):
self._itemframe = Frame(self._parent)
# Create the basic Text widget & scrollbar.
options.setdefault('background', '#e0e0e0')
self._textwidget = Text(self._itemframe, **options)
self._textscroll = Scrollbar(self._itemframe, takefocus=0,
orient='vertical')
self._textwidget.config(yscrollcommand = self._textscroll.set)
self._textscroll.config(command=self._textwidget.yview)
self._textscroll.pack(side='right', fill='y')
self._textwidget.pack(expand=1, fill='both', side='left')
# Initialize the colorization tags
self._textwidget.tag_config('highlight', background='#e0ffff',
border='1', relief='raised')
self._init_colortags(self._textwidget, options)
# How do I want to mark keyboard selection?
self._textwidget.tag_config('sel', foreground='')
self._textwidget.tag_config('sel', foreground='', background='',
border='', underline=1)
self._textwidget.tag_lower('highlight', 'sel')
def _fire_callback(self, event, itemnum):
if not self._callbacks.has_key(event): return
if 0 <= itemnum < len(self._items):
item = self._items[itemnum]
else:
item = None
for cb_func in self._callbacks[event].keys():
cb_func(item)
def _buttonpress(self, event):
clickloc = '@%d,%d' % (event.x,event.y)
insert_point = self._textwidget.index(clickloc)
itemnum = int(insert_point.split('.')[0])-1
self._fire_callback('click%d' % event.num, itemnum)
def _keypress(self, event):
if event.keysym == 'Return' or event.keysym == 'space':
insert_point = self._textwidget.index('insert')
itemnum = int(insert_point.split('.')[0])-1
self._fire_callback(event.keysym.lower(), itemnum)
return
elif event.keysym == 'Down': delta='+1line'
elif event.keysym == 'Up': delta='-1line'
elif event.keysym == 'Next': delta='+10lines'
elif event.keysym == 'Prior': delta='-10lines'
else: return 'continue'
self._textwidget.mark_set('insert', 'insert'+delta)
self._textwidget.see('insert')
self._textwidget.tag_remove('sel', '1.0', 'end+1char')
self._textwidget.tag_add('sel', 'insert linestart', 'insert lineend')
insert_point = self._textwidget.index('insert')
itemnum = int(insert_point.split('.')[0])-1
self._fire_callback(event.keysym.lower(), itemnum)
return 'break'
##//////////////////////////////////////////////////////
## Improved OptionMenu
##//////////////////////////////////////////////////////
class MutableOptionMenu(Menubutton):
def __init__(self, master, values, **options):
self._callback = options.get('command')
if 'command' in options: del options['command']
# Create a variable
self._variable = variable = StringVar()
if len(values) > 0:
variable.set(values[0])
kw = {"borderwidth": 2, "textvariable": variable,
"indicatoron": 1, "relief": RAISED, "anchor": "c",
"highlightthickness": 2}
kw.update(options)
Widget.__init__(self, master, "menubutton", kw)
self.widgetName = 'tk_optionMenu'
self._menu = Menu(self, name="menu", tearoff=0,)
self.menuname = self._menu._w
self._values = []
for value in values: self.add(value)
self["menu"] = self._menu
def add(self, value):
if value in self._values: return
def set(value=value): self.set(value)
self._menu.add_command(label=value, command=set)
self._values.append(value)
def set(self, value):
self._variable.set(value)
if self._callback:
self._callback(value)
def remove(self, value):
# Might raise indexerror: pass to parent.
i = self._values.index(value)
del self._values[i]
self._menu.delete(i, i)
def __getitem__(self, name):
if name == 'menu':
return self.__menu
return Widget.__getitem__(self, name)
def destroy(self):
"""Destroy this widget and the associated menu."""
Menubutton.destroy(self)
self._menu = None
##//////////////////////////////////////////////////////
## Helpers
##//////////////////////////////////////////////////////
def in_idle():
"""
@rtype: C{boolean}
@return: true if this function is run within idle. Tkinter
programs that are run in idle should never call L{Tk.mainloop}; so
this function should be used to gate all calls to C{Tk.mainloop}.
@warning: This function works by checking C{sys.stdin}. If the
user has modified C{sys.stdin}, then it may return incorrect
results.
"""
import sys, types
return (type(sys.stdin) == types.InstanceType and \
sys.stdin.__class__.__name__ == 'PyShell')
##//////////////////////////////////////////////////////
## Test code.
##//////////////////////////////////////////////////////
def demo():
"""
A simple demonstration showing how to use canvas widgets.
"""
def fill(cw):
from random import randint
cw['fill'] = '#00%04d' % randint(0,9999)
def color(cw):
from random import randint
cw['color'] = '#ff%04d' % randint(0,9999)
cf = CanvasFrame(closeenough=10, width=300, height=300)
c = cf.canvas()
ct3 = TextWidget(c, 'hiya there', draggable=1)
ct2 = TextWidget(c, 'o o\n||\n___\n U', draggable=1, justify='center')
co = OvalWidget(c, ct2, outline='red')
ct = TextWidget(c, 'o o\n||\n\\___/', draggable=1, justify='center')
cp = ParenWidget(c, ct, color='red')
cb = BoxWidget(c, cp, fill='cyan', draggable=1, width=3, margin=10)
equation = SequenceWidget(c,
SymbolWidget(c, 'forall'), TextWidget(c, 'x'),
SymbolWidget(c, 'exists'), TextWidget(c, 'y: '),
TextWidget(c, 'x'), SymbolWidget(c, 'notequal'),
TextWidget(c, 'y'))
space = SpaceWidget(c, 0, 30)
cstack = StackWidget(c, cb, ct3, space, co, equation, align='center')
foo = TextWidget(c, 'try clicking\nand dragging',
draggable=1, justify='center')
cs = SequenceWidget(c, cstack, foo)
zz = BracketWidget(c, cs, color='green4', width=3)
cf.add_widget(zz, 60, 30)
cb.bind_click(fill)
ct.bind_click(color)
co.bind_click(fill)
ct2.bind_click(color)
ct3.bind_click(color)
cf.mainloop()
#ShowText(None, 'title', ((('this is text'*150)+'\n')*5))
if __name__ == '__main__':
demo()
from cfg import *
from chart import *
from plot import *
from rdparser import *
from srparser import *
from tree import *
| Python |
# Natural Language Toolkit: Graphical Representations for Trees
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: tree.py 3667 2006-11-02 23:28:44Z stevenbird $
"""
Graphically display a C{Tree}.
"""
from Tkinter import *
from en.parser.nltk_lite.parse import tree
from en.parser.nltk_lite.draw import *
import sys
##//////////////////////////////////////////////////////
## Tree Segment
##//////////////////////////////////////////////////////
class TreeSegmentWidget(CanvasWidget):
"""
A canvas widget that displays a single segment of a hierarchical
tree. Each C{TreeSegmentWidget} connects a single X{node widget}
to a sequence of zero or more X{subtree widgets}. By default, the
bottom of the node is connected to the top of each subtree by a
single line. However, if the C{roof} attribute is set, then a
single triangular "roof" will connect the node to all of its
children.
Attributes:
- C{roof}: What sort of connection to draw between the node and
its subtrees. If C{roof} is true, draw a single triangular
"roof" over the subtrees. If C{roof} is false, draw a line
between each subtree and the node. Default value is false.
- C{xspace}: The amount of horizontal space to leave between
subtrees when managing this widget. Default value is 10.
- C{yspace}: The amount of space to place between the node and
its children when managing this widget. Default value is 15.
- C{color}: The color of the lines connecting the node to its
subtrees; and of the outline of the triangular roof. Default
value is C{'#006060'}.
- C{fill}: The fill color for the triangular roof. Default
value is C{''} (no fill).
- C{width}: The width of the lines connecting the node to its
subtrees; and of the outline of the triangular roof. Default
value is 1.
- C{orientation}: Determines whether the tree branches downwards
or rightwards. Possible values are C{'horizontal'} and
C{'vertical'}. The default value is C{'vertical'} (i.e.,
branch downwards).
- C{draggable}: whether the widget can be dragged by the user.
The following attributes may also be added in the near future:
- C{lineM{n}_color}: The color of the line connecting the node
to its C{M{n}}th subtree.
- C{lineM{n}_color}: The width of the line connecting the node
to its C{M{n}}th subtree.
- C{lineM{n}_color}: The dash pattern of the line connecting the
node to its C{M{n}}th subtree.
"""
def __init__(self, canvas, node, subtrees, **attribs):
"""
@type node:
@type subtrees: C{list} of C{CanvasWidgetI}
"""
self._node = node
self._subtrees = subtrees
# Attributes
self._horizontal = 0
self._roof = 0
self._xspace = 10
self._yspace = 15
self._ordered = False
# Create canvas objects.
self._lines = [canvas.create_line(0,0,0,0, fill='#006060')
for c in subtrees]
self._polygon = canvas.create_polygon(0,0, fill='', state='hidden',
outline='#006060')
# Register child widgets (node + subtrees)
self._add_child_widget(node)
for subtree in subtrees:
self._add_child_widget(subtree)
# Are we currently managing?
self._managing = False
CanvasWidget.__init__(self, canvas, **attribs)
def __setitem__(self, attr, value):
canvas = self.canvas()
if attr is 'roof':
self._roof = value
if self._roof:
for l in self._lines: canvas.itemconfig(l, state='hidden')
canvas.itemconfig(self._polygon, state='normal')
else:
for l in self._lines: canvas.itemconfig(l, state='normal')
canvas.itemconfig(self._polygon, state='hidden')
elif attr == 'orientation':
if value == 'horizontal': self._horizontal = 1
elif value == 'vertical': self._horizontal = 0
else:
raise ValueError('orientation must be horizontal or vertical')
elif attr == 'color':
for l in self._lines: canvas.itemconfig(l, fill=value)
canvas.itemconfig(self._polygon, outline=value)
elif isinstance(attr, tuple) and attr[0] == 'color':
# Set the color of an individual line.
l = self._lines[int(attr[1])]
canvas.itemconfig(l, fill=value)
elif attr == 'fill':
canvas.itemconfig(self._polygon, fill=value)
elif attr == 'width':
canvas.itemconfig(self._polygon, {attr:value})
for l in self._lines: canvas.itemconfig(l, {attr:value})
elif attr in ('xspace', 'yspace'):
if attr == 'xspace': self._xspace = value
elif attr == 'yspace': self._yspace = value
self.update(self._node)
elif attr == 'ordered':
self._ordered = value
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == 'roof': return self._roof
elif attr == 'width':
return self.canvas().itemcget(self._polygon, attr)
elif attr == 'color':
return self.canvas().itemcget(self._polygon, 'outline')
elif isinstance(attr, tuple) and attr[0] == 'color':
l = self._lines[int(attr[1])]
return self.canvas().itemcget(l, 'fill')
elif attr == 'xspace': return self._xspace
elif attr == 'yspace': return self._yspace
elif attr == 'orientation':
if self._horizontal: return 'horizontal'
else: return 'vertical'
elif attr == 'ordered':
return self._ordered
else:
return CanvasWidget.__getitem__(self, attr)
def node(self):
return self._node
def subtrees(self):
return self._subtrees[:]
def set_node(self, node):
"""
Set the node to C{node}.
"""
self._remove_child_widget(self._node)
self._add_child_widget(node)
self._node = node
self.update(self._node)
def replace_child(self, oldchild, newchild):
"""
Replace the child C{oldchild} with C{newchild}.
"""
index = self._subtrees.index(oldchild)
self._subtrees[index] = newchild
self._remove_child_widget(oldchild)
self._add_child_widget(newchild)
self.update(newchild)
def remove_child(self, child):
index = self._subtrees.index(child)
del self._subtrees[index]
self._remove_child_widget(child)
self.canvas().delete(self._lines.pop())
self.update(self._node)
def insert_child(self, index, child):
self._subtrees.insert(index, child)
self._add_child_widget(child)
self._lines.append(canvas.create_line(0,0,0,0, fill='#006060'))
self.update(self._node)
# but.. lines???
def _tags(self):
if self._roof:
return [self._polygon]
else:
return self._lines
def _subtree_top(self, child):
if isinstance(child, TreeSegmentWidget):
bbox = child.node().bbox()
else:
bbox = child.bbox()
if self._horizontal:
return (bbox[0], (bbox[1]+bbox[3])/2.0)
else:
return ((bbox[0]+bbox[2])/2.0, bbox[1])
def _node_bottom(self):
bbox = self._node.bbox()
if self._horizontal:
return (bbox[2], (bbox[1]+bbox[3])/2.0)
else:
return ((bbox[0]+bbox[2])/2.0, bbox[3])
def _update(self, child):
if len(self._subtrees) == 0: return
if self._node.bbox() is None: return # [XX] ???
# Which lines need to be redrawn?
if child is self._node: need_update = self._subtrees
else: need_update = [child]
if self._ordered and not self._managing:
need_update = self._maintain_order(child)
# Update the polygon.
(nodex, nodey) = self._node_bottom()
(xmin, ymin, xmax, ymax) = self._subtrees[0].bbox()
for subtree in self._subtrees[1:]:
bbox = subtree.bbox()
xmin = min(xmin, bbox[0])
ymin = min(ymin, bbox[1])
xmax = max(xmax, bbox[2])
ymax = max(ymax, bbox[3])
if self._horizontal:
self.canvas().coords(self._polygon, nodex, nodey, xmin,
ymin, xmin, ymax, nodex, nodey)
else:
self.canvas().coords(self._polygon, nodex, nodey, xmin,
ymin, xmax, ymin, nodex, nodey)
# Redraw all lines that need it.
for subtree in need_update:
(nodex, nodey) = self._node_bottom()
line = self._lines[self._subtrees.index(subtree)]
(subtreex, subtreey) = self._subtree_top(subtree)
self.canvas().coords(line, nodex, nodey, subtreex, subtreey)
def _maintain_order(self, child):
if self._horizontal:
return self._maintain_order_horizontal(child)
else:
return self._maintain_order_vertical(child)
def _maintain_order_vertical(self, child):
(left, top, right, bot) = child.bbox()
if child is self._node:
# Check all the leaves
for subtree in self._subtrees:
(x1, y1, x2, y2) = subtree.bbox()
if bot+self._yspace > y1:
subtree.move(0,bot+self._yspace-y1)
return self._subtrees
else:
moved = [child]
index = self._subtrees.index(child)
# Check leaves to our right.
x = right + self._xspace
for i in range(index+1, len(self._subtrees)):
(x1, y1, x2, y2) = self._subtrees[i].bbox()
if x > x1:
self._subtrees[i].move(x-x1, 0)
x += x2-x1 + self._xspace
moved.append(self._subtrees[i])
# Check leaves to our left.
x = left - self._xspace
for i in range(index-1, -1, -1):
(x1, y1, x2, y2) = self._subtrees[i].bbox()
if x < x2:
self._subtrees[i].move(x-x2, 0)
x -= x2-x1 + self._xspace
moved.append(self._subtrees[i])
# Check the node
(x1, y1, x2, y2) = self._node.bbox()
if y2 > top-self._yspace:
self._node.move(0, top-self._yspace-y2)
moved = self._subtrees
# Return a list of the nodes we moved
return moved
def _maintain_order_horizontal(self, child):
(left, top, right, bot) = child.bbox()
if child is self._node:
# Check all the leaves
for subtree in self._subtrees:
(x1, y1, x2, y2) = subtree.bbox()
if right+self._xspace > x1:
subtree.move(right+self._xspace-x1)
return self._subtrees
else:
moved = [child]
index = self._subtrees.index(child)
# Check leaves below us.
y = bot + self._yspace
for i in range(index+1, len(self._subtrees)):
(x1, y1, x2, y2) = self._subtrees[i].bbox()
if y > y1:
self._subtrees[i].move(0, y-y1)
y += y2-y1 + self._yspace
moved.append(self._subtrees[i])
# Check leaves above us
y = top - self._yspace
for i in range(index-1, -1, -1):
(x1, y1, x2, y2) = self._subtrees[i].bbox()
if y < y2:
self._subtrees[i].move(0, y-y2)
y -= y2-y1 + self._yspace
moved.append(self._subtrees[i])
# Check the node
(x1, y1, x2, y2) = self._node.bbox()
if x2 > left-self._xspace:
self._node.move(left-self._xspace-x2, 0)
moved = self._subtrees
# Return a list of the nodes we moved
return moved
def _manage_horizontal(self):
(nodex, nodey) = self._node_bottom()
# Put the subtrees in a line.
y = 20
for subtree in self._subtrees:
subtree_bbox = subtree.bbox()
dx = nodex - subtree_bbox[0] + self._xspace
dy = y - subtree_bbox[1]
subtree.move(dx, dy)
y += subtree_bbox[3] - subtree_bbox[1] + self._yspace
# Find the center of their tops.
center = 0.0
for subtree in self._subtrees:
center += self._subtree_top(subtree)[1]
center /= len(self._subtrees)
# Center the subtrees with the node.
for subtree in self._subtrees:
subtree.move(0, nodey-center)
def _manage_vertical(self):
(nodex, nodey) = self._node_bottom()
# Put the subtrees in a line.
x = 0
for subtree in self._subtrees:
subtree_bbox = subtree.bbox()
dy = nodey - subtree_bbox[1] + self._yspace
dx = x - subtree_bbox[0]
subtree.move(dx, dy)
x += subtree_bbox[2] - subtree_bbox[0] + self._xspace
# Find the center of their tops.
center = 0.0
for subtree in self._subtrees:
center += self._subtree_top(subtree)[0]/len(self._subtrees)
# Center the subtrees with the node.
for subtree in self._subtrees:
subtree.move(nodex-center, 0)
def _manage(self):
self._managing = True
(nodex, nodey) = self._node_bottom()
if len(self._subtrees) == 0: return
if self._horizontal: self._manage_horizontal()
else: self._manage_vertical()
# Update lines to subtrees.
for subtree in self._subtrees:
self._update(subtree)
self._managing = False
def __repr__(self):
return '[TreeSeg %s: %s]' % (self._node, self._subtrees)
def _tree_to_treeseg(canvas, t, make_node, make_leaf,
tree_attribs, node_attribs,
leaf_attribs, loc_attribs):
if isinstance(t, tree.Tree):
node = make_node(canvas, t.node, **node_attribs)
subtrees = [_tree_to_treeseg(canvas, child, make_node, make_leaf,
tree_attribs, node_attribs,
leaf_attribs, loc_attribs)
for child in t]
return TreeSegmentWidget(canvas, node, subtrees, **tree_attribs)
else:
return make_leaf(canvas, t, **leaf_attribs)
def tree_to_treesegment(canvas, t, make_node=TextWidget,
make_leaf=TextWidget, **attribs):
"""
Convert a C{Tree} into a C{TreeSegmentWidget}.
@param make_node: A C{CanvasWidget} constructor or a function that
creates C{CanvasWidgets}. C{make_node} is used to convert
the C{Tree}'s nodes into C{CanvasWidgets}. If no constructor
is specified, then C{TextWidget} will be used.
@param make_leaf: A C{CanvasWidget} constructor or a function that
creates C{CanvasWidgets}. C{make_leaf} is used to convert
the C{Tree}'s leafs into C{CanvasWidgets}. If no constructor
is specified, then C{TextWidget} will be used.
@param attribs: Attributes for the canvas widgets that make up the
returned C{TreeSegmentWidget}. Any attribute beginning with
C{'tree_'} will be passed to all C{TreeSegmentWidget}s (with
the C{'tree_'} prefix removed. Any attribute beginning with
C{'node_'} will be passed to all nodes. Any attribute
beginning with C{'leaf_'} will be passed to all leaves. And
any attribute beginning with C{'loc_'} will be passed to all
text locations (for C{Tree}s).
"""
# Process attribs.
tree_attribs = {}
node_attribs = {}
leaf_attribs = {}
loc_attribs = {}
for (key, value) in attribs.items():
if key[:5] == 'tree_': tree_attribs[key[5:]] = value
elif key[:5] == 'node_': node_attribs[key[5:]] = value
elif key[:5] == 'leaf_': leaf_attribs[key[5:]] = value
elif key[:4] == 'loc_': loc_attribs[key[4:]] = value
else: raise ValueError('Bad attribute: %s' % key)
return _tree_to_treeseg(canvas, t, make_node, make_leaf,
tree_attribs, node_attribs,
leaf_attribs, loc_attribs)
##//////////////////////////////////////////////////////
## Tree Widget
##//////////////////////////////////////////////////////
class TreeWidget(CanvasWidget):
"""
A canvas widget that displays a single C{Tree}.
C{TreeWidget} manages a group of C{TreeSegmentWidget}s that are
used to display a C{Tree}.
Attributes:
- C{node_M{attr}}: Sets the attribute C{M{attr}} on all of the
node widgets for this C{TreeWidget}.
- C{node_M{attr}}: Sets the attribute C{M{attr}} on all of the
leaf widgets for this C{TreeWidget}.
- C{loc_M{attr}}: Sets the attribute C{M{attr}} on all of the
location widgets for this C{TreeWidget} (if it was built from
a C{Tree}). Note that location widgets are
C{TextWidget}s.
- C{xspace}: The amount of horizontal space to leave between
subtrees when managing this widget. Default value is 10.
- C{yspace}: The amount of space to place between the node and
its children when managing this widget. Default value is 15.
- C{line_color}: The color of the lines connecting each expanded
node to its subtrees.
- C{roof_color}: The color of the outline of the triangular roof
for collapsed trees.
- C{roof_fill}: The fill color for the triangular roof for
collapsed trees.
- C{width}
- C{orientation}: Determines whether the tree branches downwards
or rightwards. Possible values are C{'horizontal'} and
C{'vertical'}. The default value is C{'vertical'} (i.e.,
branch downwards).
- C{shapeable}: whether the subtrees can be independantly
dragged by the user. THIS property simply sets the
C{DRAGGABLE} property on all of the C{TreeWidget}'s tree
segments.
- C{draggable}: whether the widget can be dragged by the user.
"""
def __init__(self, canvas, t, make_node=TextWidget,
make_leaf=TextWidget, **attribs):
# Node & leaf canvas widget constructors
self._make_node = make_node
self._make_leaf = make_leaf
self._tree = t
# Attributes.
self._nodeattribs = {}
self._leafattribs = {}
self._locattribs = {'color': '#008000'}
self._line_color = '#008080'
self._line_width = 1
self._roof_color = '#008080'
self._roof_fill = '#c0c0c0'
self._shapeable = False
self._xspace = 10
self._yspace = 10
self._orientation = 'vertical'
self._ordered = False
# Build trees.
self._keys = {} # treeseg -> key
self._expanded_trees = {}
self._collapsed_trees = {}
self._nodes = []
self._leaves = []
#self._locs = []
self._make_collapsed_trees(canvas, t, ())
self._treeseg = self._make_expanded_tree(canvas, t, ())
self._add_child_widget(self._treeseg)
CanvasWidget.__init__(self, canvas, **attribs)
def expanded_tree(self, *path_to_tree):
"""
Return the C{TreeSegmentWidget} for the specified subtree.
@param path_to_tree: A list of indices i1, i2, ..., in, where
the desired widget is the widget corresponding to
C{tree.children()[i1].children()[i2]....children()[in]}.
For the root, the path is C{()}.
"""
return self._expanded_trees[path_to_tree]
def collapsed_tree(self, *path_to_tree):
"""
Return the C{TreeSegmentWidget} for the specified subtree.
@param path_to_tree: A list of indices i1, i2, ..., in, where
the desired widget is the widget corresponding to
C{tree.children()[i1].children()[i2]....children()[in]}.
For the root, the path is C{()}.
"""
return self._collapsed_trees[path_to_tree]
def bind_click_trees(self, callback, button=1):
"""
Add a binding to all tree segments.
"""
for tseg in self._expanded_trees.values():
tseg.bind_click(callback, button)
for tseg in self._collapsed_trees.values():
tseg.bind_click(callback, button)
def bind_drag_trees(self, callback, button=1):
"""
Add a binding to all tree segments.
"""
for tseg in self._expanded_trees.values():
tseg.bind_drag(callback, button)
for tseg in self._collapsed_trees.values():
tseg.bind_drag(callback, button)
def bind_click_leaves(self, callback, button=1):
"""
Add a binding to all leaves.
"""
for leaf in self._leaves: leaf.bind_click(callback, button)
for leaf in self._leaves: leaf.bind_click(callback, button)
def bind_drag_leaves(self, callback, button=1):
"""
Add a binding to all leaves.
"""
for leaf in self._leaves: leaf.bind_drag(callback, button)
for leaf in self._leaves: leaf.bind_drag(callback, button)
def bind_click_nodes(self, callback, button=1):
"""
Add a binding to all nodes.
"""
for node in self._nodes: node.bind_click(callback, button)
for node in self._nodes: node.bind_click(callback, button)
def bind_drag_nodes(self, callback, button=1):
"""
Add a binding to all nodes.
"""
for node in self._nodes: node.bind_drag(callback, button)
for node in self._nodes: node.bind_drag(callback, button)
def _make_collapsed_trees(self, canvas, t, key):
if not isinstance(t, tree.Tree): return
make_node = self._make_node
make_leaf = self._make_leaf
node = make_node(canvas, t.node, **self._nodeattribs)
self._nodes.append(node)
leaves = [make_leaf(canvas, l, **self._leafattribs)
for l in t.leaves()]
self._leaves += leaves
treeseg = TreeSegmentWidget(canvas, node, leaves, roof=1,
color=self._roof_color,
fill=self._roof_fill,
width=self._line_width)
self._collapsed_trees[key] = treeseg
self._keys[treeseg] = key
#self._add_child_widget(treeseg)
treeseg.hide()
# Build trees for children.
for i in range(len(t)):
child = t[i]
self._make_collapsed_trees(canvas, child, key + (i,))
def _make_expanded_tree(self, canvas, t, key):
make_node = self._make_node
make_leaf = self._make_leaf
if isinstance(t, tree.Tree):
node = make_node(canvas, t.node, **self._nodeattribs)
self._nodes.append(node)
children = t
subtrees = [self._make_expanded_tree(canvas, children[i], key+(i,))
for i in range(len(children))]
treeseg = TreeSegmentWidget(canvas, node, subtrees,
color=self._line_color,
width=self._line_width)
self._expanded_trees[key] = treeseg
self._keys[treeseg] = key
return treeseg
else:
leaf = make_leaf(canvas, t, **self._leafattribs)
self._leaves.append(leaf)
return leaf
def __setitem__(self, attr, value):
if attr[:5] == 'node_':
for node in self._nodes: node[attr[5:]] = value
elif attr[:5] == 'leaf_':
for leaf in self._leaves: leaf[attr[5:]] = value
elif attr == 'line_color':
self._line_color = value
for tseg in self._expanded_trees.values(): tseg['color'] = value
elif attr == 'line_width':
self._line_width = value
for tseg in self._expanded_trees.values(): tseg['width'] = value
for tseg in self._collapsed_trees.values(): tseg['width'] = value
elif attr == 'roof_color':
self._roof_color = value
for tseg in self._collapsed_trees.values(): tseg['color'] = value
elif attr == 'roof_fill':
self._roof_fill = value
for tseg in self._collapsed_trees.values(): tseg['fill'] = value
elif attr == 'shapeable':
self._shapeable = value
for tseg in self._expanded_trees.values():
tseg['draggable'] = value
for tseg in self._collapsed_trees.values():
tseg['draggable'] = value
for leaf in self._leaves: leaf['draggable'] = value
elif attr == 'xspace':
self._xspace = value
for tseg in self._expanded_trees.values():
tseg['xspace'] = value
for tseg in self._collapsed_trees.values():
tseg['xspace'] = value
self.manage()
elif attr == 'yspace':
self._yspace = value
for tseg in self._expanded_trees.values():
tseg['yspace'] = value
for tseg in self._collapsed_trees.values():
tseg['yspace'] = value
self.manage()
elif attr == 'orientation':
self._orientation = value
for tseg in self._expanded_trees.values():
tseg['orientation'] = value
for tseg in self._collapsed_trees.values():
tseg['orientation'] = value
self.manage()
elif attr == 'ordered':
self._ordered = value
for tseg in self._expanded_trees.values():
tseg['ordered'] = value
for tseg in self._collapsed_trees.values():
tseg['ordered'] = value
else: CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr[:5] == 'node_':
return self._nodeattribs.get(attr[5:], None)
elif attr[:5] == 'leaf_':
return self._leafattribs.get(attr[5:], None)
elif attr[:4] == 'loc_':
return self._locattribs.get(attr[4:], None)
elif attr == 'line_color': return self._line_color
elif attr == 'line_width': return self._line_width
elif attr == 'roof_color': return self._roof_color
elif attr == 'roof_fill': return self._roof_fill
elif attr == 'shapeable': return self._shapeable
elif attr == 'xspace': return self._xspace
elif attr == 'yspace': return self._yspace
elif attr == 'orientation': return self._orientation
else: return CanvasWidget.__getitem__(self, attr)
def _tags(self): return []
def _manage(self):
segs = self._expanded_trees.values() + self._collapsed_trees.values()
for tseg in segs:
if tseg.hidden():
tseg.show()
tseg.manage()
tseg.hide()
def toggle_collapsed(self, treeseg):
"""
Collapse/expand a tree.
"""
old_treeseg = treeseg
if old_treeseg['roof']:
new_treeseg = self._expanded_trees[self._keys[old_treeseg]]
else:
new_treeseg = self._collapsed_trees[self._keys[old_treeseg]]
# Replace the old tree with the new tree.
if old_treeseg.parent() is self:
self._remove_child_widget(old_treeseg)
self._add_child_widget(new_treeseg)
self._treeseg = new_treeseg
else:
old_treeseg.parent().replace_child(old_treeseg, new_treeseg)
# Move the new tree to where the old tree was. Show it first,
# so we can find its bounding box.
new_treeseg.show()
(newx, newy) = new_treeseg.node().bbox()[:2]
(oldx, oldy) = old_treeseg.node().bbox()[:2]
new_treeseg.move(oldx-newx, oldy-newy)
# Hide the old tree
old_treeseg.hide()
# We could do parent.manage() here instead, if we wanted.
new_treeseg.parent().update(new_treeseg)
##//////////////////////////////////////////////////////
## draw_trees
##//////////////////////////////////////////////////////
class TreeView(object):
def __init__(self, *trees):
from en.parser.nltk_lite.draw import CanvasFrame
from math import sqrt, ceil
self._trees = trees
self._top = Tk()
self._top.title('NLTK')
self._top.bind('<Control-x>', self.destroy)
self._top.bind('<Control-q>', self.destroy)
cf = self._cframe = CanvasFrame(self._top)
self._top.bind('<Control-p>', self._cframe.print_to_file)
# Size is variable.
self._size = IntVar(self._top)
self._size.set(12)
bold = ('helvetica', -self._size.get(), 'bold')
helv = ('helvetica', -self._size.get())
# Lay the trees out in a square.
self._width = int(ceil(sqrt(len(trees))))
self._widgets = []
for i in range(len(trees)):
widget = TreeWidget(cf.canvas(), trees[i], node_font=bold,
leaf_color='#008040', node_color='#004080',
roof_color='#004040', roof_fill='white',
line_color='#004040', draggable=1,
leaf_font=helv)
widget.bind_click_trees(widget.toggle_collapsed)
self._widgets.append(widget)
cf.add_widget(widget, 0, 0)
self._layout()
self._cframe.pack(expand=1, fill='both')
self._init_menubar()
def _layout(self):
i = x = y = ymax = 0
width = self._width
for i in range(len(self._widgets)):
widget = self._widgets[i]
(oldx, oldy) = widget.bbox()[:2]
if i % width == 0:
y = ymax
x = 0
widget.move(x-oldx, y-oldy)
x = widget.bbox()[2] + 10
ymax = max(ymax, widget.bbox()[3] + 10)
def _init_menubar(self):
menubar = Menu(self._top)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='Print to Postscript', underline=0,
command=self._cframe.print_to_file,
accelerator='Ctrl-p')
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
zoommenu = Menu(menubar, tearoff=0)
zoommenu.add_radiobutton(label='Tiny', variable=self._size,
underline=0, value=10, command=self.resize)
zoommenu.add_radiobutton(label='Small', variable=self._size,
underline=0, value=12, command=self.resize)
zoommenu.add_radiobutton(label='Medium', variable=self._size,
underline=0, value=14, command=self.resize)
zoommenu.add_radiobutton(label='Large', variable=self._size,
underline=0, value=28, command=self.resize)
zoommenu.add_radiobutton(label='Huge', variable=self._size,
underline=0, value=50, command=self.resize)
menubar.add_cascade(label='Zoom', underline=0, menu=zoommenu)
self._top.config(menu=menubar)
def resize(self, *e):
bold = ('helvetica', -self._size.get(), 'bold')
helv = ('helvetica', -self._size.get())
xspace = self._size.get()
yspace = self._size.get()
for widget in self._widgets:
widget['node_font'] = bold
widget['leaf_font'] = helv
widget['xspace'] = xspace
widget['yspace'] = yspace
if self._size.get() < 20: widget['line_width'] = 1
elif self._size.get() < 30: widget['line_width'] = 2
else: widget['line_width'] = 3
self._layout()
def destroy(self, *e):
if self._top is None: return
self._top.destroy()
self._top = None
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle(): return
self._top.mainloop(*args, **kwargs)
def draw_trees(*trees):
"""
Open a new window containing a graphical diagram of the given
trees.
@rtype: None
"""
TreeView(*trees).mainloop()
return
##//////////////////////////////////////////////////////
## Demo Code
##//////////////////////////////////////////////////////
def demo():
import random
def fill(cw):
cw['fill'] = '#%06d' % random.randint(0,999999)
cf = CanvasFrame(width=550, height=450, closeenough=2)
t = tree.bracket_parse('''
(S (NP the very big cat)
(VP (Adv sorta) (V saw) (NP (Det the) (N dog))))''')
tc = TreeWidget(cf.canvas(), t, draggable=1,
node_font=('helvetica', -14, 'bold'),
leaf_font=('helvetica', -12, 'italic'),
roof_fill='white', roof_color='black',
leaf_color='green4', node_color='blue2')
cf.add_widget(tc,10,10)
def boxit(canvas, text):
big = ('helvetica', -16, 'bold')
return BoxWidget(canvas, TextWidget(canvas, text,
font=big), fill='green')
def ovalit(canvas, text):
return OvalWidget(canvas, TextWidget(canvas, text),
fill='cyan')
treetok = tree.bracket_parse('(S (NP this tree) (VP (V is) (AdjP shapeable)))')
tc2 = TreeWidget(cf.canvas(), treetok, boxit, ovalit, shapeable=1)
def color(node):
node['color'] = '#%04d00' % random.randint(0,9999)
def color2(treeseg):
treeseg.node()['fill'] = '#%06d' % random.randint(0,9999)
treeseg.node().child()['color'] = 'white'
tc.bind_click_trees(tc.toggle_collapsed)
tc2.bind_click_trees(tc2.toggle_collapsed)
tc.bind_click_nodes(color, 3)
tc2.expanded_tree(1).bind_click(color2, 3)
tc2.expanded_tree().bind_click(color2, 3)
paren = ParenWidget(cf.canvas(), tc2)
cf.add_widget(paren, tc.bbox()[2]+10, 10)
tree3 = tree.bracket_parse('''
(S (NP this tree) (AUX was)
(VP (V built) (PP (P with) (NP (N tree_to_treesegment)))))''')
tc3 = tree_to_treesegment(cf.canvas(), tree3, tree_color='green4',
tree_xspace=2, tree_width=2)
tc3['draggable'] = 1
cf.add_widget(tc3, 10, tc.bbox()[3]+10)
def orientswitch(treewidget):
if treewidget['orientation'] == 'horizontal':
treewidget.expanded_tree(1,1).subtrees()[0].set_text('vertical')
treewidget.collapsed_tree(1,1).subtrees()[0].set_text('vertical')
treewidget.collapsed_tree(1).subtrees()[1].set_text('vertical')
treewidget.collapsed_tree().subtrees()[3].set_text('vertical')
treewidget['orientation'] = 'vertical'
else:
treewidget.expanded_tree(1,1).subtrees()[0].set_text('horizontal')
treewidget.collapsed_tree(1,1).subtrees()[0].set_text('horizontal')
treewidget.collapsed_tree(1).subtrees()[1].set_text('horizontal')
treewidget.collapsed_tree().subtrees()[3].set_text('horizontal')
treewidget['orientation'] = 'horizontal'
text = """
Try clicking, right clicking, and dragging
different elements of each of the trees.
The top-left tree is a TreeWidget built from
a Tree. The top-right is a TreeWidget built
from a Tree, using non-default widget
constructors for the nodes & leaves (BoxWidget
and OvalWidget). The bottom-left tree is
built from tree_to_treesegment."""
twidget = TextWidget(cf.canvas(), text.strip())
textbox = BoxWidget(cf.canvas(), twidget, fill='white', draggable=1)
cf.add_widget(textbox, tc3.bbox()[2]+10, tc2.bbox()[3]+10)
tree4 = tree.bracket_parse('(S (NP this tree) (VP (V is) (Adj horizontal)))')
tc4 = TreeWidget(cf.canvas(), tree4, draggable=1,
line_color='brown2', roof_color='brown2',
node_font=('helvetica', -12, 'bold'),
node_color='brown4', orientation='horizontal')
tc4.manage()
cf.add_widget(tc4, tc3.bbox()[2]+10, textbox.bbox()[3]+10)
tc4.bind_click(orientswitch)
tc4.bind_click_trees(tc4.toggle_collapsed, 3)
# Run mainloop
cf.mainloop()
if __name__ == '__main__':
demo()
| Python |
#!/usr/bin/env python
#
# Copyright (c) 2002 Vivake Gupta (vivakeATomniscia.org). All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# This software is maintained by Vivake (vivakeATomniscia.org) and is available at:
# http://www.omniscia.org/~vivake/python/PorterStemmer.py
#
# Additional modifications were made to incorporate this module into
# nltk. All such modifications are marked with "--NLTK--". The nltk
# version of this module is maintained by the NLTK development staff,
# and is available from the NLTK webpage:
# <http://nltk.sourceforge.net>
"""Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It follows the algorithm
presented in
Porter, M. "An algorithm for suffix stripping." Program 14.3 (1980): 130-137.
only differing from it at the points maked --DEPARTURE-- and --NEW--
below.
For a more faithful version of the Porter algorithm, see
http://www.tartarus.org/~martin/PorterStemmer/
Later additions:
June 2000
The 'l' of the 'logi' -> 'log' rule is put with the stem, so that
short stems like 'geo' 'theo' etc work like 'archaeo' 'philo' etc.
This follows a suggestion of Barry Wilkins, reasearch student at
Birmingham.
February 2000
the cvc test for not dropping final -e now looks after vc at the
beginning of a word, so are, eve, ice, ore, use keep final -e. In this
test c is any consonant, including w, x and y. This extension was
suggested by Chris Emerson.
-fully -> -ful treated like -fulness -> -ful, and
-tionally -> -tion treated like -tional -> -tion
both in Step 2. These were suggested by Hiranmay Ghosh, of New Delhi.
Invariants proceed, succeed, exceed. Also suggested by Hiranmay Ghosh.
Additional modifications were made to incorperate this module into
nltk. All such modifications are marked with \"--NLTK--\". The nltk
version of this module is maintained by the NLTK developers, and is
available from <http://nltk.sourceforge.net>
"""
## --NLTK--
## Declare this module's documentation format.
__docformat__ = 'plaintext'
import sys
import re
import string
## --NLTK--
## Import the nltk.stemmer module, which defines the stemmer interface
from en.parser.nltk_lite.stem import *
class Porter(StemI):
## --NLTK--
## Add a module docstring
"""
A word stemmer based on the Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of this module for more
information.
The Porter Stemmer requires that all tokens have string types.
"""
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
## --NEW--
## This is a table of irregular forms. It is quite short, but still
## reflects the errors actually drawn to Martin Porter's attention over
## a 20 year period!
##
## Extend it as necessary.
##
## The form of the table is:
## {
## "p1" : ["s11","s12","s13", ... ],
## "p2" : ["s21","s22","s23", ... ],
## ...
## "pn" : ["sn1","sn2","sn3", ... ]
## }
##
## String sij is mapped to paradigm form pi, and the main stemming
## process is then bypassed.
irregular_forms = {
"sky" : ["sky", "skies"],
"die" : ["dying"],
"lie" : ["lying"],
"tie" : ["tying"],
"news" : ["news"],
"inning" : ["innings", "inning"],
"outing" : ["outings", "outing"],
"canning" : ["cannings", "canning"],
"howe" : ["howe"],
# --NEW--
"proceed" : ["proceed"],
"exceed" : ["exceed"],
"succeed" : ["succeed"], # Hiranmay Ghosh
}
self.pool = {}
for key in irregular_forms.keys():
for val in irregular_forms[key]:
self.pool[val] = key
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j-1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=>
a) ( --NEW--) i == 1, and p[0] p[1] is vowel consonant, or
b) p[i - 2], p[i - 1], p[i] has the form consonant -
vowel - consonant and also if the second c is not w, x or y. this
is used when trying to restore an e at the end of a short word.
e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i == 0: return 0 # i == 0 never happens perhaps
if i == 1: return (not self.cons(0) and self.cons(1))
if not self.cons(i) or self.cons(i-1) or not self.cons(i-2): return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s, readjusting k."""
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
sties -> sti
tie -> tie (--NEW--: see below)
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
if self.j == 0:
self.k = self.k - 1
# this line extends the original algorithm, so that
# 'flies'->'fli' but 'dies'->'die' etc
else:
self.k = self.k - 2
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("ied"):
if self.j == 0:
self.k = self.k - 1
else:
self.k = self.k - 2
# this line extends the original algorithm, so that
# 'spied'->'spi' but 'died'->'die' etc
elif self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"): self.setto("ate")
elif self.ends("bl"): self.setto("ble")
elif self.ends("iz"): self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in the stem.
--NEW--: This has been modified from the original Porter algorithm so that y->i
is only done when y is preceded by a consonant, but not if the stem
is only a single consonant, i.e.
(*c and not c) Y -> I
So 'happy' -> 'happi', but
'enjoy' -> 'enjoy' etc
This is a much better rule. Formerly 'enjoy'->'enjoi' and 'enjoyment'->
'enjoy'. Step 1c is perhaps done too soon; but with this modification that
no longer really matters.
Also, the removal of the vowelinstem(z) condition means that 'spy', 'fly',
'try' ... stem to 'spi', 'fli', 'tri' and conflate with 'spied', 'tried',
'flies' ...
"""
if self.ends("y") and self.j > 0 and self.cons(self.k - 1):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"): self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"):
if self.m() > 0: # --NEW--
self.setto("al")
self.step2()
elif self.ends("fulli"): self.r("ful") # --NEW--
elif self.ends("entli"): self.r("ent")
elif self.ends("eli"): self.r("e")
elif self.ends("ousli"): self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"): self.r("ize")
elif self.ends("ation"): self.r("ate")
elif self.ends("ator"): self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"): self.r("al")
elif self.ends("iveness"): self.r("ive")
elif self.ends("fulness"): self.r("ful")
elif self.ends("ousness"): self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"): self.r("al")
elif self.ends("iviti"): self.r("ive")
elif self.ends("biliti"): self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"):
self.j = self.j + 1 # --NEW-- (Barry Wilkins)
self.r("og")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"): self.r("ic")
elif self.ends("ful"): self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if self.ends("er"): pass
else: return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"): pass
else: return
elif self.b[self.k - 1] == 'l':
if self.ends("able"): pass
elif self.ends("ible"): pass
else: return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"): pass
elif self.ends("ement"): pass
elif self.ends("ment"): pass
elif self.ends("ent"): pass
else: return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass
elif self.ends("ou"): pass
# takes care of -ous
else: return
elif self.b[self.k - 1] == 's':
if self.ends("ism"): pass
else: return
elif self.b[self.k - 1] == 't':
if self.ends("ate"): pass
elif self.ends("iti"): pass
else: return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"): pass
else: return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"): pass
else: return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"): pass
else: return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1
def stem_word(self, p, i=0, j=None):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
## --NLTK--
## Don't print results as we go (commented out the next line)
#print p[i:j+1]
if j == None:
j = len(p) - 1
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.pool.has_key(self.b[self.k0:self.k+1]):
return self.pool[self.b[self.k0:self.k+1]]
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k+1]
def adjust_case(self, word, stem):
lower = string.lower(word)
ret = ""
for x in xrange(len(stem)):
if lower[x] == stem[x]:
ret = ret + word[x]
else:
ret = ret + stem[x]
return ret
## --NLTK--
## Don't use this procedure; we want to work with individual
## tokens, instead. (commented out the following procedure)
#def stem(self, text):
# parts = re.split("(\W+)", text)
# numWords = (len(parts) + 1)/2
#
# ret = ""
# for i in xrange(numWords):
# word = parts[2 * i]
# separator = ""
# if ((2 * i) + 1) < len(parts):
# separator = parts[(2 * i) + 1]
#
# stem = self.stem_word(string.lower(word), 0, len(word) - 1)
# ret = ret + self.adjust_case(word, stem)
# ret = ret + separator
# return ret
## --NLTK--
## Define a stem() method that implements the StemmerI interface.
def stem(self, word):
stem = self.stem_word(string.lower(word), 0, len(word) - 1)
return self.adjust_case(word, stem)
## --NLTK--
## Add a string representation function
def __repr__(self):
return '<Porter Stemmer>'
## --NLTK--
## This test procedure isn't applicable.
#if __name__ == '__main__':
# p = Porter()
# if len(sys.argv) > 1:
# for f in sys.argv[1:]:
# infile = open(f, 'r')
# while 1:
# w = infile.readline()
# if w == '':
# break
# w = w[:-1]
# print p.stem(w)
##--NLTK--
## Added a demo() function
def demo():
"""
A demonstration of the porter stemmer on a sample from
the Penn Treebank corpus.
"""
from en.parser.nltk_lite.corpora import treebank
from en.parser.nltk_lite import stem
stemmer = stem.Porter()
i = 0
orig = []
stemmed = []
for sent in treebank.raw():
for word in sent:
orig.append(word)
sword = stemmer.stem(word)
stemmed.append(sword)
i+=1
if i>3: break
# Convert the results to a string, and word-wrap them.
results = ' '.join(stemmed)
results = re.sub(r"(.{,70})\s", r'\1\n', results+' ').rstrip()
# Convert the original to a string, and word wrap it.
original = ' '.join(orig)
original = re.sub(r"(.{,70})\s", r'\1\n', original+' ').rstrip()
# Print the results.
print '-Original-'.center(70).replace(' ', '*').replace('-', ' ')
print original
print '-Results-'.center(70).replace(' ', '*').replace('-', ' ')
print results
print '*'*70
##--NLTK--
## Call demo() if we're invoked directly.
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: Stemmers
#
# Copyright (C) 2001-2006 University of Melbourne
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from en.parser.nltk_lite.stem import *
class Regexp(StemI):
"""
A stemmer that uses regular expressions to identify morphological
affixes. Any substrings that matches the regular expressions will
be removed.
"""
def __init__(self, regexp, min=0):
"""
Create a new regexp stemmer.
@type regexp: C{string} or C{regexp}
@param regexp: The regular expression that should be used to
identify morphological affixes.
@type min: int
@param min: The minimum length of string to stem
"""
if not hasattr(regexp, 'pattern'):
regexp = re.compile(regexp)
self._regexp = regexp
self._min = min
def stem(self, word):
if len(word) < self._min:
return word
else:
return self._regexp.sub('', word)
def __repr__(self):
return '<Regexp Stemmer: %r>' % self._regexp.pattern
def demo():
from en.parser.nltk_lite import tokenize, stem
# Create a simple regular expression based stemmer
stemmer = stem.Regexp('ing$|s$|e$', min=4)
text = "John was eating icecream"
tokens = tokenize.whitespace(text)
# Print the results.
print stemmer
for word in tokens:
print '%20s => %s' % (word, stemmer.stem(word))
print
if __name__ == '__main__': demo()
| Python |
#!/usr/bin/env python
#
# Copyright (c) 2002 Vivake Gupta (vivakeATomniscia.org). All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# This software is maintained by Vivake (vivakeATomniscia.org) and is available at:
# http://www.omniscia.org/~vivake/python/PorterStemmer.py
#
# Additional modifications were made to incorporate this module into
# nltk. All such modifications are marked with "--NLTK--". The nltk
# version of this module is maintained by the NLTK development staff,
# and is available from the NLTK webpage:
# <http://nltk.sourceforge.net>
"""Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It follows the algorithm
presented in
Porter, M. "An algorithm for suffix stripping." Program 14.3 (1980): 130-137.
only differing from it at the points maked --DEPARTURE-- and --NEW--
below.
For a more faithful version of the Porter algorithm, see
http://www.tartarus.org/~martin/PorterStemmer/
Later additions:
June 2000
The 'l' of the 'logi' -> 'log' rule is put with the stem, so that
short stems like 'geo' 'theo' etc work like 'archaeo' 'philo' etc.
This follows a suggestion of Barry Wilkins, reasearch student at
Birmingham.
February 2000
the cvc test for not dropping final -e now looks after vc at the
beginning of a word, so are, eve, ice, ore, use keep final -e. In this
test c is any consonant, including w, x and y. This extension was
suggested by Chris Emerson.
-fully -> -ful treated like -fulness -> -ful, and
-tionally -> -tion treated like -tional -> -tion
both in Step 2. These were suggested by Hiranmay Ghosh, of New Delhi.
Invariants proceed, succeed, exceed. Also suggested by Hiranmay Ghosh.
Additional modifications were made to incorperate this module into
nltk. All such modifications are marked with \"--NLTK--\". The nltk
version of this module is maintained by the NLTK developers, and is
available from <http://nltk.sourceforge.net>
"""
## --NLTK--
## Declare this module's documentation format.
__docformat__ = 'plaintext'
import sys
import re
import string
## --NLTK--
## Import the nltk.stemmer module, which defines the stemmer interface
from en.parser.nltk_lite.stem import *
class Porter(StemI):
## --NLTK--
## Add a module docstring
"""
A word stemmer based on the Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of this module for more
information.
The Porter Stemmer requires that all tokens have string types.
"""
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
## --NEW--
## This is a table of irregular forms. It is quite short, but still
## reflects the errors actually drawn to Martin Porter's attention over
## a 20 year period!
##
## Extend it as necessary.
##
## The form of the table is:
## {
## "p1" : ["s11","s12","s13", ... ],
## "p2" : ["s21","s22","s23", ... ],
## ...
## "pn" : ["sn1","sn2","sn3", ... ]
## }
##
## String sij is mapped to paradigm form pi, and the main stemming
## process is then bypassed.
irregular_forms = {
"sky" : ["sky", "skies"],
"die" : ["dying"],
"lie" : ["lying"],
"tie" : ["tying"],
"news" : ["news"],
"inning" : ["innings", "inning"],
"outing" : ["outings", "outing"],
"canning" : ["cannings", "canning"],
"howe" : ["howe"],
# --NEW--
"proceed" : ["proceed"],
"exceed" : ["exceed"],
"succeed" : ["succeed"], # Hiranmay Ghosh
}
self.pool = {}
for key in irregular_forms.keys():
for val in irregular_forms[key]:
self.pool[val] = key
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j-1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=>
a) ( --NEW--) i == 1, and p[0] p[1] is vowel consonant, or
b) p[i - 2], p[i - 1], p[i] has the form consonant -
vowel - consonant and also if the second c is not w, x or y. this
is used when trying to restore an e at the end of a short word.
e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i == 0: return 0 # i == 0 never happens perhaps
if i == 1: return (not self.cons(0) and self.cons(1))
if not self.cons(i) or self.cons(i-1) or not self.cons(i-2): return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s, readjusting k."""
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
sties -> sti
tie -> tie (--NEW--: see below)
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
if self.j == 0:
self.k = self.k - 1
# this line extends the original algorithm, so that
# 'flies'->'fli' but 'dies'->'die' etc
else:
self.k = self.k - 2
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("ied"):
if self.j == 0:
self.k = self.k - 1
else:
self.k = self.k - 2
# this line extends the original algorithm, so that
# 'spied'->'spi' but 'died'->'die' etc
elif self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"): self.setto("ate")
elif self.ends("bl"): self.setto("ble")
elif self.ends("iz"): self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in the stem.
--NEW--: This has been modified from the original Porter algorithm so that y->i
is only done when y is preceded by a consonant, but not if the stem
is only a single consonant, i.e.
(*c and not c) Y -> I
So 'happy' -> 'happi', but
'enjoy' -> 'enjoy' etc
This is a much better rule. Formerly 'enjoy'->'enjoi' and 'enjoyment'->
'enjoy'. Step 1c is perhaps done too soon; but with this modification that
no longer really matters.
Also, the removal of the vowelinstem(z) condition means that 'spy', 'fly',
'try' ... stem to 'spi', 'fli', 'tri' and conflate with 'spied', 'tried',
'flies' ...
"""
if self.ends("y") and self.j > 0 and self.cons(self.k - 1):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"): self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"):
if self.m() > 0: # --NEW--
self.setto("al")
self.step2()
elif self.ends("fulli"): self.r("ful") # --NEW--
elif self.ends("entli"): self.r("ent")
elif self.ends("eli"): self.r("e")
elif self.ends("ousli"): self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"): self.r("ize")
elif self.ends("ation"): self.r("ate")
elif self.ends("ator"): self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"): self.r("al")
elif self.ends("iveness"): self.r("ive")
elif self.ends("fulness"): self.r("ful")
elif self.ends("ousness"): self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"): self.r("al")
elif self.ends("iviti"): self.r("ive")
elif self.ends("biliti"): self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"):
self.j = self.j + 1 # --NEW-- (Barry Wilkins)
self.r("og")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"): self.r("ic")
elif self.ends("ful"): self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if self.ends("er"): pass
else: return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"): pass
else: return
elif self.b[self.k - 1] == 'l':
if self.ends("able"): pass
elif self.ends("ible"): pass
else: return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"): pass
elif self.ends("ement"): pass
elif self.ends("ment"): pass
elif self.ends("ent"): pass
else: return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass
elif self.ends("ou"): pass
# takes care of -ous
else: return
elif self.b[self.k - 1] == 's':
if self.ends("ism"): pass
else: return
elif self.b[self.k - 1] == 't':
if self.ends("ate"): pass
elif self.ends("iti"): pass
else: return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"): pass
else: return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"): pass
else: return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"): pass
else: return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1
def stem_word(self, p, i=0, j=None):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
## --NLTK--
## Don't print results as we go (commented out the next line)
#print p[i:j+1]
if j == None:
j = len(p) - 1
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.pool.has_key(self.b[self.k0:self.k+1]):
return self.pool[self.b[self.k0:self.k+1]]
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k+1]
def adjust_case(self, word, stem):
lower = string.lower(word)
ret = ""
for x in xrange(len(stem)):
if lower[x] == stem[x]:
ret = ret + word[x]
else:
ret = ret + stem[x]
return ret
## --NLTK--
## Don't use this procedure; we want to work with individual
## tokens, instead. (commented out the following procedure)
#def stem(self, text):
# parts = re.split("(\W+)", text)
# numWords = (len(parts) + 1)/2
#
# ret = ""
# for i in xrange(numWords):
# word = parts[2 * i]
# separator = ""
# if ((2 * i) + 1) < len(parts):
# separator = parts[(2 * i) + 1]
#
# stem = self.stem_word(string.lower(word), 0, len(word) - 1)
# ret = ret + self.adjust_case(word, stem)
# ret = ret + separator
# return ret
## --NLTK--
## Define a stem() method that implements the StemmerI interface.
def stem(self, word):
stem = self.stem_word(string.lower(word), 0, len(word) - 1)
return self.adjust_case(word, stem)
## --NLTK--
## Add a string representation function
def __repr__(self):
return '<Porter Stemmer>'
## --NLTK--
## This test procedure isn't applicable.
#if __name__ == '__main__':
# p = Porter()
# if len(sys.argv) > 1:
# for f in sys.argv[1:]:
# infile = open(f, 'r')
# while 1:
# w = infile.readline()
# if w == '':
# break
# w = w[:-1]
# print p.stem(w)
##--NLTK--
## Added a demo() function
def demo():
"""
A demonstration of the porter stemmer on a sample from
the Penn Treebank corpus.
"""
from en.parser.nltk_lite.corpora import treebank
from en.parser.nltk_lite import stem
stemmer = stem.Porter()
i = 0
orig = []
stemmed = []
for sent in treebank.raw():
for word in sent:
orig.append(word)
sword = stemmer.stem(word)
stemmed.append(sword)
i+=1
if i>3: break
# Convert the results to a string, and word-wrap them.
results = ' '.join(stemmed)
results = re.sub(r"(.{,70})\s", r'\1\n', results+' ').rstrip()
# Convert the original to a string, and word wrap it.
original = ' '.join(orig)
original = re.sub(r"(.{,70})\s", r'\1\n', original+' ').rstrip()
# Print the results.
print '-Original-'.center(70).replace(' ', '*').replace('-', ' ')
print original
print '-Results-'.center(70).replace(' ', '*').replace('-', ' ')
print results
print '*'*70
##--NLTK--
## Call demo() if we're invoked directly.
if __name__ == '__main__': demo()
| Python |
# Natural Language Toolkit: Stemmers
#
# Copyright (C) 2001-2006 University of Melbourne
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Interfaces used to remove morphological affixes from words, leaving
only the word stem. Stemming algorithms aim to remove those affixes
required for eg. grammatical role, tense, derivational morphology
leaving only the stem of the word. This is a difficult problem due to
irregular words (eg. common verbs in English), complicated
morphological rules, and part-of-speech and sense ambiguities
(eg. C{ceil-} is not the stem of C{ceiling}).
C{StemI} defines a standard interface for stemmers.
"""
import re
##//////////////////////////////////////////////////////
## Stemmer Interface
##//////////////////////////////////////////////////////
class StemI(object):
"""
A processing interface for removing morphological affixes from
words. This process is known as X{stemming}.
"""
def stem(self, token):
"""
Strip affixes from the token and return the stem.
@param token: The token that should be stemmed.
@type token: L{string}
"""
raise NotImplementedError()
from regexp import *
from porter import *
| Python |
# Natural Language Toolkit: Utility functions
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
##########################################################################
# PRETTY PRINTING
##########################################################################
def pr(data, start=0, end=None):
"""
Pretty print a sequence of data items
@param data: the data stream to print
@type data: C{sequence} or C{iterator}
@param start: the start position
@type start: C{int}
@param end: the end position
@type end: C{int}
"""
from pprint import pprint
from itertools import islice
pprint(list(islice(data, start, end)))
def print_string(s, width=70):
"""
Pretty print a string, breaking lines on whitespace
@param s: the string to print, consisting of words and spaces
@type s: C{string}
@param width: the display width
@type width: C{int}
"""
import re
while s:
s = s.strip()
try:
i = s[:width].rindex(' ')
except ValueError:
print s
return
print s[:i]
s = s[i:]
class SortedDict(dict):
"""
A very rudamentary sorted dictionary, whose main purpose is to
allow dictionaries to be displayed in a consistent order in
regression tests. keys(), items(), values(), iter*(), and
__repr__ all sort their return values before returning them.
(note that the sort order for values() does *not* correspond to
the sort order for keys(). I.e., zip(d.keys(), d.values()) is not
necessarily equal to d.items().
"""
def keys(self): return sorted(dict.keys(self))
def items(self): return sorted(dict.items(self))
def values(self): return sorted(dict.values(self))
def iterkeys(self): return iter(sorted(dict.keys(self)))
def iteritems(self): return iter(sorted(dict.items(self)))
def itervalues(self): return iter(sorted(dict.values(self)))
def __iter__(self): return iter(sorted(dict.keys(self)))
def repr(self):
items = ['%s=%s' % t for t in sorted(self.items())]
return '{%s}' % ', '.join(items)
##########################################################################
# EDIT DISTANCE (LEVENSHTEIN)
##########################################################################
def _edit_dist_init(len1, len2):
lev = []
for i in range(len1):
lev.append([0] * len2) # initialize 2-D array to zero
for i in range(len1):
lev[i][0] = i # column 0: 0,1,2,3,4,...
for j in range(len2):
lev[0][j] = j # row 0: 0,1,2,3,4,...
return lev
def _edit_dist_step(lev, i, j, c1, c2):
a = lev[i-1][j ] + 1 # skipping s1[i]
b = lev[i-1][j-1] + (c1 != c2) # matching s1[i] with s2[j]
c = lev[i ][j-1] + 1 # skipping s2[j]
lev[i][j] = min(a,b,c) # pick the cheapest
def edit_dist(s1, s2):
"""
Calculate the Levenshtein edit-distance between two strings.
The edit distance is the number of characters that need to be
substituted, inserted, or deleted, to transform s1 into s2. For
example, transforming "rain" to "shine" requires three steps,
consisting of two substitutions and one insertion:
"rain" -> "sain" -> "shin" -> "shine". These operations could have
been done in other orders, but at least three steps are needed.
@param s1, s2: The strings to be analysed
@type s1, s2: C{string}
@rtype C{int}
"""
# set up a 2-D array
len1 = len(s1); len2 = len(s2)
lev = _edit_dist_init(len1+1, len2+1)
# iterate over the array
for i in range(len1):
for j in range (len2):
_edit_dist_step(lev, i+1, j+1, s1[i], s2[j])
return lev[len1][len2]
##########################################################################
# MINIMAL SETS
##########################################################################
class MinimalSet(object):
"""
Find contexts where more than one possible target value can
appear. E.g. if targets are word-initial letters, and contexts
are the remainders of words, then we would like to find cases like
"fat" vs "cat", and "training" vs "draining". If targets are
parts-of-speech and contexts are words, then we would like to find
cases like wind (noun) 'air in rapid motion', vs wind (verb)
'coil, wrap'.
"""
def __init__(self, parameters=None):
"""
Create a new minimal set.
@param parameters: The (context, target, display) tuples for the item
@type parameters: C{list} of C{tuple} of C{string}
"""
self._targets = set() # the contrastive information
self._contexts = set() # what we are controlling for
self._seen = {} # to record what we have seen
self._displays = {} # what we will display
for context, target, display in parameters:
self.add(context, target, display)
def add(self, context, target, display):
"""
Add a new item to the minimal set, having the specified
context, target, and display form.
@param context: The context in which the item of interest appears
@type context: C{string}
@param target: The item of interest
@type target: C{string}
@param display: The information to be reported for each item
@type display: C{string}
"""
# Store the set of targets that occurred in this context
if context not in self._seen:
self._seen[context] = set()
self._seen[context].add(target)
# Keep track of which contexts and targets we have seen
self._contexts.add(context)
self._targets.add(target)
# For a given context and target, store the display form
self._displays[(context, target)] = display
def contexts(self, minimum=2):
"""
Determine which contexts occurred with enough distinct targets.
@param minimum: the minimum number of distinct target forms
@type minimum: C(int)
@rtype C(list)
"""
return [c for c in self._contexts if len(self._seen[c]) >= minimum]
def display(self, context, target, default=""):
if self._displays.has_key((context, target)):
return self._displays[(context, target)]
else:
return default
def display_all(self, context):
result = []
for target in self._targets:
x = self.display(context, target)
if x: result.append(x)
return result
def targets(self):
return self._targets
######################################################################
## Regexp display (thanks to David Mertz)
######################################################################
import re
def re_show(regexp, string):
"""
Search C{string} for substrings matching C{regexp} and wrap
the matches with braces. This is convenient for learning about
regular expressions.
@param regexp: The regular expression.
@param string: The string being matched.
@rtype: C{string}
@return: A string with braces surrounding the matched substrings.
"""
print re.compile(regexp, re.M).sub("{\g<0>}", string.rstrip())
##########################################################################
# READ FROM FILE OR STRING
##########################################################################
# recipe from David Mertz
def filestring(f):
if hasattr(f, 'read'):
return f.read()
elif isinstance(f, basestring):
return open(f).read()
else:
raise ValueError, "Must be called with a filename or file-like object"
##########################################################################
# COUNTER, FOR UNIQUE NAMING
##########################################################################
class Counter:
"""
A counter that auto-increments each time its value is read.
"""
def __init__(self, initial_value=0):
self._value = initial_value
def get(self):
self._value += 1
return self._value
| Python |
# Module wordnet.py
#
# Original author: Oliver Steele <steele@osteele.com>
# Project Page: http://sourceforge.net/projects/pywordnet
#
# Copyright (c) 1998-2004 by Oliver Steele. Use is permitted under
# the Artistic License
# <http://www.opensource.org/licenses/artistic-license.html>
"""Utility functions to use with the wordnet module.
Usage
-----
>>> dog = N['dog'][0]
# (First 10) adjectives that are transitively SIMILAR to the main sense of 'red'
>>> closure(ADJ['red'][0], SIMILAR)[:10]
['red' in {adjective: red, reddish, ruddy, blood-red, carmine, cerise, cherry, cherry-red, crimson, ruby, ruby-red, scarlet}, {adjective: chromatic}, {adjective: amber, brownish-yellow, yellow-brown}, {adjective: amethyst}, {adjective: aureate, gilded, gilt, gold, golden}, {adjective: azure, cerulean, sky-blue, bright blue}, {adjective: blue, bluish, blueish, light-blue, dark-blue, blue-black}, {adjective: bluish green, blue-green, cyan, teal}, {adjective: blushful, rosy}, {adjective: bottle-green}]
>>> # Adjectives that are transitively SIMILAR to any of the senses of 'red'
>>> #flatten1(map(lambda sense:closure(sense, SIMILAR), ADJ['red'])) # too verbose
>>> # Hyponyms of the main sense of 'dog'(n.) that are homophonous with verbs
>>> filter(lambda sense:V.get(sense.form), flatten1(map(lambda e:e.getSenses(), hyponyms(N['dog'][0]))))
['dog' in {noun: dog, domestic dog, Canis familiaris}, 'pooch' in {noun: pooch, doggie, doggy, barker, bow-wow}, 'toy' in {noun: toy dog, toy}, 'hound' in {noun: hound, hound dog}, 'basset' in {noun: basset, basset hound}, 'cocker' in {noun: cocker spaniel, English cocker spaniel, cocker}, 'bulldog' in {noun: bulldog, English bulldog}]
>>> # Find the senses of 'raise'(v.) and 'lower'(v.) that are antonyms
>>> filter(lambda p:p[0] in p[1].pointerTargets(ANTONYM), product(V['raise'].getSenses(), V['lower'].getSenses()))
[('raise' in {verb: raise, lift, elevate, get up, bring up}, 'lower' in {verb: lower, take down, let down, get down, bring down})]
"""
__author__ = "Oliver Steele <steele@osteele.com>"
__version__ = "2.0"
from wordnet import *
#
# Domain utilities
#
def _requireSource(entity):
if not hasattr(entity, 'pointers'):
if isinstance(entity, Word):
raise TypeError, `entity` + " is not a Sense or Synset. Try " + `entity` + "[0] instead."
else:
raise TypeError, `entity` + " is not a Sense or Synset"
def tree(source, pointerType):
"""
>>> dog = N['dog'][0]
>>> from pprint import pprint
>>> pprint(tree(dog, HYPERNYM))
['dog' in {noun: dog, domestic dog, Canis familiaris},
[{noun: canine, canid},
[{noun: carnivore},
[{noun: placental, placental mammal, eutherian, eutherian mammal},
[{noun: mammal},
[{noun: vertebrate, craniate},
[{noun: chordate},
[{noun: animal, animate being, beast, brute, creature, fauna},
[{noun: organism, being},
[{noun: living thing, animate thing},
[{noun: object, physical object}, [{noun: entity}]]]]]]]]]]]]
>>> #pprint(tree(dog, HYPONYM)) # too verbose to include here
"""
if isinstance(source, Word):
return map(lambda s, t=pointerType:tree(s,t), source.getSenses())
_requireSource(source)
return [source] + map(lambda s, t=pointerType:tree(s,t), source.pointerTargets(pointerType))
def closure(source, pointerType, accumulator=None):
"""Return the transitive closure of source under the pointerType
relationship. If source is a Word, return the union of the
closures of its senses.
>>> dog = N['dog'][0]
>>> closure(dog, HYPERNYM)
['dog' in {noun: dog, domestic dog, Canis familiaris}, {noun: canine, canid}, {noun: carnivore}, {noun: placental, placental mammal, eutherian, eutherian mammal}, {noun: mammal}, {noun: vertebrate, craniate}, {noun: chordate}, {noun: animal, animate being, beast, brute, creature, fauna}, {noun: organism, being}, {noun: living thing, animate thing}, {noun: object, physical object}, {noun: entity}]
"""
if isinstance(source, Word):
return reduce(union, map(lambda s, t=pointerType:tree(s,t), source.getSenses()))
_requireSource(source)
if accumulator is None:
accumulator = []
if source not in accumulator:
accumulator.append(source)
for target in source.pointerTargets(pointerType):
closure(target, pointerType, accumulator)
return accumulator
def hyponyms(source):
"""Return source and its hyponyms. If source is a Word, return
the union of the hyponyms of its senses."""
return closure(source, HYPONYM)
def hypernyms(source):
"""Return source and its hypernyms. If source is a Word, return
the union of the hypernyms of its senses."""
return closure(source, HYPERNYM)
def meet(a, b, pointerType=HYPERNYM):
"""Return the meet of a and b under the pointerType relationship.
>>> meet(N['dog'][0], N['cat'][0])
{noun: carnivore}
>>> meet(N['dog'][0], N['person'][0])
{noun: organism, being}
>>> meet(N['thought'][0], N['belief'][0])
{noun: content, cognitive content, mental object}
"""
return (intersection(closure(a, pointerType), closure(b, pointerType)) + [None])[0]
#
# String Utility Functions
#
def startsWith(str, prefix):
"""Return true iff _str_ starts with _prefix_.
>>> startsWith('unclear', 'un')
1
"""
return str[:len(prefix)] == prefix
def endsWith(str, suffix):
"""Return true iff _str_ ends with _suffix_.
>>> endsWith('clearly', 'ly')
1
"""
return str[-len(suffix):] == suffix
def equalsIgnoreCase(a, b):
"""Return true iff a and b have the same lowercase representation.
>>> equalsIgnoreCase('dog', 'Dog')
1
>>> equalsIgnoreCase('dOg', 'DOG')
1
"""
# test a == b first as an optimization where they're equal
return a == b or string.lower(a) == string.lower(b)
#
# Sequence Utility Functions
#
def issequence(item):
"""Return true iff _item_ is a Sequence (a List, String, or Tuple).
>>> issequence((1,2))
1
>>> issequence([1,2])
1
>>> issequence('12')
1
>>> issequence(1)
0
"""
return type(item) in (ListType, StringType, TupleType)
def intersection(u, v):
"""Return the intersection of _u_ and _v_.
>>> intersection((1,2,3), (2,3,4))
[2, 3]
"""
w = []
for e in u:
if e in v:
w.append(e)
return w
def union(u, v):
"""Return the union of _u_ and _v_.
>>> union((1,2,3), (2,3,4))
[1, 2, 3, 4]
"""
w = list(u)
if w is u:
import copy
w = copy.copy(w)
for e in v:
if e not in w:
w.append(e)
return w
def product(u, v):
"""Return the Cartesian product of u and v.
>>> product("123", "abc")
[('1', 'a'), ('1', 'b'), ('1', 'c'), ('2', 'a'), ('2', 'b'), ('2', 'c'), ('3', 'a'), ('3', 'b'), ('3', 'c')]
"""
return flatten1(map(lambda a, v=v:map(lambda b, a=a:(a,b), v), u))
def removeDuplicates(sequence):
"""Return a copy of _sequence_ with equal items removed.
>>> removeDuplicates("this is a test")
['t', 'h', 'i', 's', ' ', 'a', 'e']
>>> removeDuplicates(map(lambda tuple:apply(meet, tuple), product(N['story'].getSenses(), N['joke'].getSenses())))
[{noun: message, content, subject matter, substance}, None, {noun: abstraction}, {noun: communication}]
"""
accumulator = []
for item in sequence:
if item not in accumulator:
accumulator.append(item)
return accumulator
#
# Tree Utility Functions
#
def flatten1(sequence):
accumulator = []
for item in sequence:
if type(item) == TupleType:
item = list(item)
if type(item) == ListType:
accumulator.extend(item)
else:
accumulator.append(item)
return accumulator
#
# WordNet utilities
#
GET_INDEX_SUBSTITUTIONS = ((' ', '-'), ('-', ' '), ('-', ''), (' ', ''), ('.', ''))
def getIndex(form, pos='noun'):
"""Search for _form_ in the index file corresponding to
_pos_. getIndex applies to _form_ an algorithm that replaces
underscores with hyphens, hyphens with underscores, removes
hyphens and underscores, and removes periods in an attempt to find
a form of the string that is an exact match for an entry in the
index file corresponding to _pos_. getWord() is called on each
transformed string until a match is found or all the different
strings have been tried. It returns a Word or None."""
def trySubstitutions(trySubstitutions, form, substitutions, lookup=1, dictionary=dictionaryFor(pos)):
if lookup and dictionary.has_key(form):
return dictionary[form]
elif substitutions:
(old, new) = substitutions[0]
substitute = string.replace(form, old, new) and substitute != form
if substitute and dictionary.has_key(substitute):
return dictionary[substitute]
return trySubstitutions(trySubstitutions, form, substitutions[1:], lookup=0) or \
(substitute and trySubstitutions(trySubstitutions, substitute, substitutions[1:]))
return trySubstitutions(returnMatch, form, GET_INDEX_SUBSTITUTIONS)
MORPHOLOGICAL_SUBSTITUTIONS = {
NOUN:
[('s', ''),
('ses', 's'),
('ves', 'f'),
('xes', 'x'),
('zes', 'z'),
('ches', 'ch'),
('shes', 'sh'),
('men', 'man'),
('ies', 'y')],
VERB:
[('s', ''),
('ies', 'y'),
('es', 'e'),
('es', ''),
('ed', 'e'),
('ed', ''),
('ing', 'e'),
('ing', '')],
ADJECTIVE:
[('er', ''),
('est', ''),
('er', 'e'),
('est', 'e')],
ADVERB: []}
def morphy(form, pos='noun', collect=0):
"""Recursively uninflect _form_, and return the first form found
in the dictionary. If _collect_ is true, a sequence of all forms
is returned, instead of just the first one.
>>> morphy('dogs')
'dog'
>>> morphy('churches')
'church'
>>> morphy('aardwolves')
'aardwolf'
>>> morphy('abaci')
'abacus'
>>> morphy('hardrock', 'adv')
"""
from wordnet import _normalizePOS, _dictionaryFor
pos = _normalizePOS(pos)
fname = os.path.join(WNSEARCHDIR, {NOUN: 'noun', VERB: 'verb', ADJECTIVE: 'adj', ADVERB: 'adv'}[pos] + '.exc')
excfile = open(fname)
substitutions = MORPHOLOGICAL_SUBSTITUTIONS[pos]
def trySubstitutions(trySubstitutions, # workaround for lack of nested closures in Python < 2.1
form, # reduced form
substitutions, # remaining substitutions
lookup=1,
dictionary=_dictionaryFor(pos),
excfile=excfile,
collect=collect,
collection=[]):
import string
exceptions = binarySearchFile(excfile, form)
if exceptions:
form = exceptions[string.find(exceptions, ' ')+1:-1]
if lookup and dictionary.has_key(form):
if collect:
collection.append(form)
else:
return form
elif substitutions:
old, new = substitutions[0]
substitutions = substitutions[1:]
substitute = None
if endsWith(form, old):
substitute = form[:-len(old)] + new
#if dictionary.has_key(substitute):
# return substitute
form = trySubstitutions(trySubstitutions, form, substitutions) or \
(substitute and trySubstitutions(trySubstitutions, substitute, substitutions))
return (collect and collection) or form
elif collect:
return collection
return trySubstitutions(trySubstitutions, form, substitutions)
#
# Testing
#
def _test(reset=0):
import doctest, wntools
if reset:
doctest.master = None # This keeps doctest from complaining after a reload.
return doctest.testmod(wntools)
| Python |
# Module wordnet.py
#
# Original author: Oliver Steele <steele@osteele.com>
# Project Page: http://sourceforge.net/projects/pywordnet
#
# Copyright (c) 1998-2004 by Oliver Steele. Use is permitted under
# the Artistic License
# <http://www.opensource.org/licenses/artistic-license.html>
"""An OO interface to the WordNet database.
Usage
-----
>>> from wordnet import *
>>> # Retrieve words from the database
>>> N['dog']
dog(n.)
>>> V['dog']
dog(v.)
>>> ADJ['clear']
clear(adj.)
>>> ADV['clearly']
clearly(adv.)
>>> # Examine a word's senses and pointers:
>>> N['dog'].getSenses()
('dog' in {noun: dog, domestic dog, Canis familiaris}, 'dog' in {noun: frump, dog}, 'dog' in {noun: dog}, 'dog' in {noun: cad, bounder, blackguard, dog, hound, heel}, 'dog' in {noun: frank, frankfurter, hotdog, hot dog, dog, wiener, wienerwurst, weenie}, 'dog' in {noun: pawl, detent, click, dog}, 'dog' in {noun: andiron, firedog, dog, dog-iron})
>>> # Extract the first sense
>>> dog = N['dog'][0] # aka N['dog'].getSenses()[0]
>>> dog
'dog' in {noun: dog, domestic dog, Canis familiaris}
>>> dog.getPointers()[:5]
(hypernym -> {noun: canine, canid}, member meronym -> {noun: Canis, genus Canis}, member meronym -> {noun: pack}, hyponym -> {noun: pooch, doggie, doggy, barker, bow-wow}, hyponym -> {noun: cur, mongrel, mutt})
>>> dog.getPointerTargets(MEMBER_MERONYM)
[{noun: Canis, genus Canis}, {noun: pack}]
"""
__author__ = "Oliver Steele <steele@osteele.com>"
__version__ = "2.0.1"
import string
import os
from os import environ
from types import IntType, ListType, StringType, TupleType
#
# Configuration variables
#
WNHOME = environ.get('WNHOME', {
'mac': ":",
'dos': "C:\\wn16",
'nt': "C:\\Program Files\\WordNet\\2.0"}
.get(os.name, "/usr/local/wordnet2.0"))
WNSEARCHDIR = environ.get('WNSEARCHDIR', os.path.join(WNHOME, {'mac': "Database"}.get(os.name, "dict")))
ReadableRepresentations = 1
"""If true, repr(word), repr(sense), and repr(synset) return
human-readable strings instead of strings that evaluate to an object
equal to the argument.
This breaks the contract for repr, but it makes the system much more
usable from the command line."""
_TraceLookups = 0
_FILE_OPEN_MODE = os.name in ('dos', 'nt') and 'rb' or 'r' # work around a Windows Python bug
#
# Enumerated types
#
NOUN = 'noun'
VERB = 'verb'
ADJECTIVE = 'adjective'
ADVERB = 'adverb'
PartsOfSpeech = (NOUN, VERB, ADJECTIVE, ADVERB)
ANTONYM = 'antonym'
HYPERNYM = 'hypernym'
HYPONYM = 'hyponym'
ATTRIBUTE = 'attribute'
ALSO_SEE = 'also see'
ENTAILMENT = 'entailment'
CAUSE = 'cause'
VERB_GROUP = 'verb group'
MEMBER_MERONYM = 'member meronym'
SUBSTANCE_MERONYM = 'substance meronym'
PART_MERONYM = 'part meronym'
MEMBER_HOLONYM = 'member holonym'
SUBSTANCE_HOLONYM = 'substance holonym'
PART_HOLONYM = 'part holonym'
SIMILAR = 'similar'
PARTICIPLE_OF = 'participle of'
PERTAINYM = 'pertainym'
# New in wn 2.0:
FRAMES = 'frames'
CLASSIF_CATEGORY = 'domain category'
CLASSIF_USAGE = 'domain usage'
CLASSIF_REGIONAL = 'domain regional'
CLASS_CATEGORY = 'class category'
CLASS_USAGE = 'class usage'
CLASS_REGIONAL = 'class regional'
# New in wn 2.1:
INSTANCE_HYPERNYM = 'hypernym (instance)'
INSTANCE_HYPONYM = 'hyponym (instance)'
POINTER_TYPES = (
ANTONYM,
HYPERNYM,
HYPONYM,
ATTRIBUTE,
ALSO_SEE,
ENTAILMENT,
CAUSE,
VERB_GROUP,
MEMBER_MERONYM,
SUBSTANCE_MERONYM,
PART_MERONYM,
MEMBER_HOLONYM,
SUBSTANCE_HOLONYM,
PART_HOLONYM,
SIMILAR,
PARTICIPLE_OF,
PERTAINYM,
# New in wn 2.0:
FRAMES,
CLASSIF_CATEGORY,
CLASSIF_USAGE,
CLASSIF_REGIONAL,
CLASS_CATEGORY,
CLASS_USAGE,
CLASS_REGIONAL,
# New in wn 2.1:
INSTANCE_HYPERNYM,
INSTANCE_HYPONYM,
)
ATTRIBUTIVE = 'attributive'
PREDICATIVE = 'predicative'
IMMEDIATE_POSTNOMINAL = 'immediate postnominal'
ADJECTIVE_POSITIONS = (ATTRIBUTIVE, PREDICATIVE, IMMEDIATE_POSTNOMINAL, None)
VERB_FRAME_STRINGS = (
None,
"Something %s",
"Somebody %s",
"It is %sing",
"Something is %sing PP",
"Something %s something Adjective/Noun",
"Something %s Adjective/Noun",
"Somebody %s Adjective",
"Somebody %s something",
"Somebody %s somebody",
"Something %s somebody",
"Something %s something",
"Something %s to somebody",
"Somebody %s on something",
"Somebody %s somebody something",
"Somebody %s something to somebody",
"Somebody %s something from somebody",
"Somebody %s somebody with something",
"Somebody %s somebody of something",
"Somebody %s something on somebody",
"Somebody %s somebody PP",
"Somebody %s something PP",
"Somebody %s PP",
"Somebody's (body part) %s",
"Somebody %s somebody to INFINITIVE",
"Somebody %s somebody INFINITIVE",
"Somebody %s that CLAUSE",
"Somebody %s to somebody",
"Somebody %s to INFINITIVE",
"Somebody %s whether INFINITIVE",
"Somebody %s somebody into V-ing something",
"Somebody %s something with something",
"Somebody %s INFINITIVE",
"Somebody %s VERB-ing",
"It %s that CLAUSE",
"Something %s INFINITIVE")
#
# Domain classes
#
class Word:
"""An index into the database.
Each word has one or more Senses, which can be accessed via
``word.getSenses()`` or through the index notation, ``word[n]``.
Fields
------
form : string
The orthographic representation of the word.
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
string : string
Same as form (for compatability with version 1.0).
taggedSenseCount : integer
The number of senses that are tagged.
Examples
--------
>>> N['dog'].pos
'noun'
>>> N['dog'].form
'dog'
>>> N['dog'].taggedSenseCount
1
"""
def __init__(self, line):
"""Initialize the word from a line of a WN POS file."""
tokens = string.split(line)
ints = map(int, tokens[int(tokens[3]) + 4:])
self.form = string.replace(tokens[0], '_', ' ')
"Orthographic representation of the word."
self.pos = _normalizePOS(tokens[1])
"Part of speech. One of NOUN, VERB, ADJECTIVE, ADVERB."
self.taggedSenseCount = ints[1]
"Number of senses that are tagged."
self._synsetOffsets = ints[2:ints[0]+2]
def getPointers(self, pointerType=None):
"""Pointers connect senses and synsets, not words.
Try word[0].getPointers() instead."""
raise self.getPointers.__doc__
def getPointerTargets(self, pointerType=None):
"""Pointers connect senses and synsets, not words.
Try word[0].getPointerTargets() instead."""
raise self.getPointers.__doc__
def getSenses(self):
"""Return a sequence of senses.
>>> N['dog'].getSenses()
('dog' in {noun: dog, domestic dog, Canis familiaris}, 'dog' in {noun: frump, dog}, 'dog' in {noun: dog}, 'dog' in {noun: cad, bounder, blackguard, dog, hound, heel}, 'dog' in {noun: frank, frankfurter, hotdog, hot dog, dog, wiener, wienerwurst, weenie}, 'dog' in {noun: pawl, detent, click, dog}, 'dog' in {noun: andiron, firedog, dog, dog-iron})
"""
if not hasattr(self, '_senses'):
def getSense(offset, pos=self.pos, form=self.form):
return getSynset(pos, offset)[form]
self._senses = tuple(map(getSense, self._synsetOffsets))
del self._synsetOffsets
return self._senses
# Deprecated. Present for backwards compatability.
def senses(self):
import wordnet
#warningKey = 'SENSE_DEPRECATION_WARNING'
#if not wordnet.has_key(warningKey):
# print 'Word.senses() has been deprecated. Use Word.sense() instead.'
# wordnet[warningKey] = 1
return self.getSense()
def isTagged(self):
"""Return 1 if any sense is tagged.
>>> N['dog'].isTagged()
1
"""
return self.taggedSenseCount > 0
def getAdjectivePositions(self):
"""Return a sequence of adjective positions that this word can
appear in. These are elements of ADJECTIVE_POSITIONS.
>>> ADJ['clear'].getAdjectivePositions()
[None, 'predicative']
"""
positions = {}
for sense in self.getSenses():
positions[sense.position] = 1
return positions.keys()
adjectivePositions = getAdjectivePositions # backwards compatability
def __cmp__(self, other):
"""
>>> N['cat'] < N['dog']
1
>>> N['dog'] < V['dog']
1
"""
return _compareInstances(self, other, ('pos', 'form'))
def __str__(self):
"""Return a human-readable representation.
>>> str(N['dog'])
'dog(n.)'
"""
abbrs = {NOUN: 'n.', VERB: 'v.', ADJECTIVE: 'adj.', ADVERB: 'adv.'}
return self.form + "(" + abbrs[self.pos] + ")"
def __repr__(self):
"""If ReadableRepresentations is true, return a human-readable
representation, e.g. 'dog(n.)'.
If ReadableRepresentations is false, return a machine-readable
representation, e.g. "getWord('dog', 'noun')".
"""
if ReadableRepresentations:
return str(self)
return "getWord" + `(self.form, self.pos)`
#
# Sequence protocol (a Word's elements are its Senses)
#
def __nonzero__(self):
return 1
def __len__(self):
return len(self.getSenses())
def __getitem__(self, index):
return self.getSenses()[index]
def __getslice__(self, i, j):
return self.getSenses()[i:j]
class Synset:
"""A set of synonyms that share a common meaning.
Each synonym contains one or more Senses, which represent a
specific sense of a specific word. Senses can be retrieved via
synset.getSenses() or through the index notations synset[0],
synset[string], or synset[word]. Synsets also originate zero or
more typed pointers, which can be accessed via
synset.getPointers() or synset.getPointers(pointerType). The
targets of a synset pointer can be retrieved via
synset.getPointerTargets() or
synset.getPointerTargets(pointerType), which are equivalent to
map(Pointer.target, synset.getPointerTargets(...)).
Fields
------
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
offset : integer
An integer offset into the part-of-speech file. Together
with pos, this can be used as a unique id.
gloss : string
A gloss for the sense.
verbFrames : [integer]
A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that any
Sense in this synset participates in. (See also
Sense.verbFrames.) Defined only for verbs.
>>> V['think'][0].synset.verbFrames
(5, 9)
"""
def __init__(self, pos, offset, line):
"Initialize the synset from a line off a WN synset file."
self.pos = pos
"part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB."
self.offset = offset
"""integer offset into the part-of-speech file. Together
with pos, this can be used as a unique id."""
tokens = string.split(line[:string.index(line, '|')])
self.ssType = tokens[2]
self.gloss = string.strip(line[string.index(line, '|') + 1:])
self.lexname = Lexname.lexnames[int(tokens[1])]
(self._senseTuples, remainder) = _partition(tokens[4:], 2, string.atoi(tokens[3], 16))
(self._pointerTuples, remainder) = _partition(remainder[1:], 4, int(remainder[0]))
if pos == VERB:
(vfTuples, remainder) = _partition(remainder[1:], 3, int(remainder[0]))
def extractVerbFrames(index, vfTuples):
return tuple(map(lambda t:string.atoi(t[1]), filter(lambda t,i=index:string.atoi(t[2],16) in (0, i), vfTuples)))
senseVerbFrames = []
for index in range(1, len(self._senseTuples) + 1):
senseVerbFrames.append(extractVerbFrames(index, vfTuples))
self._senseVerbFrames = senseVerbFrames
self.verbFrames = tuple(extractVerbFrames(None, vfTuples))
"""A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that any
Sense in this synset participates in. (See also
Sense.verbFrames.) Defined only for verbs."""
def getSenses(self):
"""Return a sequence of Senses.
>>> N['dog'][0].getSenses()
('dog' in {noun: dog, domestic dog, Canis familiaris},)
"""
if not hasattr(self, '_senses'):
def loadSense(senseTuple, verbFrames=None, synset=self):
return Sense(synset, senseTuple, verbFrames)
if self.pos == VERB:
self._senses = tuple(map(loadSense, self._senseTuples, self._senseVerbFrames))
del self._senseVerbFrames
else:
self._senses = tuple(map(loadSense, self._senseTuples))
del self._senseTuples
return self._senses
senses = getSenses
def getPointers(self, pointerType=None):
"""Return a sequence of Pointers.
If pointerType is specified, only pointers of that type are
returned. In this case, pointerType should be an element of
POINTER_TYPES.
>>> N['dog'][0].getPointers()[:5]
(hypernym -> {noun: canine, canid}, member meronym -> {noun: Canis, genus Canis}, member meronym -> {noun: pack}, hyponym -> {noun: pooch, doggie, doggy, barker, bow-wow}, hyponym -> {noun: cur, mongrel, mutt})
>>> N['dog'][0].getPointers(HYPERNYM)
(hypernym -> {noun: canine, canid},)
"""
if not hasattr(self, '_pointers'):
def loadPointer(tuple, synset=self):
return Pointer(synset.offset, tuple)
self._pointers = tuple(map(loadPointer, self._pointerTuples))
del self._pointerTuples
if pointerType == None:
return self._pointers
else:
_requirePointerType(pointerType)
return filter(lambda pointer, type=pointerType: pointer.type == type, self._pointers)
pointers = getPointers # backwards compatability
def getPointerTargets(self, pointerType=None):
"""Return a sequence of Senses or Synsets.
If pointerType is specified, only targets of pointers of that
type are returned. In this case, pointerType should be an
element of POINTER_TYPES.
>>> N['dog'][0].getPointerTargets()[:5]
[{noun: canine, canid}, {noun: Canis, genus Canis}, {noun: pack}, {noun: pooch, doggie, doggy, barker, bow-wow}, {noun: cur, mongrel, mutt}]
>>> N['dog'][0].getPointerTargets(HYPERNYM)
[{noun: canine, canid}]
"""
return map(Pointer.target, self.getPointers(pointerType))
pointerTargets = getPointerTargets # backwards compatability
def isTagged(self):
"""Return 1 if any sense is tagged.
>>> N['dog'][0].isTagged()
1
>>> N['dog'][1].isTagged()
0
"""
return len(filter(Sense.isTagged, self.getSenses())) > 0
def __str__(self):
"""Return a human-readable representation.
>>> str(N['dog'][0].synset)
'{noun: dog, domestic dog, Canis familiaris}'
"""
return "{" + self.pos + ": " + string.joinfields(map(lambda sense:sense.form, self.getSenses()), ", ") + "}"
def __repr__(self):
"""If ReadableRepresentations is true, return a human-readable
representation, e.g. 'dog(n.)'.
If ReadableRepresentations is false, return a machine-readable
representation, e.g. "getSynset(pos, 1234)".
"""
if ReadableRepresentations:
return str(self)
return "getSynset" + `(self.pos, self.offset)`
def __cmp__(self, other):
return _compareInstances(self, other, ('pos', 'offset'))
#
# Sequence protocol (a Synset's elements are its senses).
#
def __nonzero__(self):
return 1
def __len__(self):
"""
>>> len(N['dog'][0].synset)
3
"""
return len(self.getSenses())
def __getitem__(self, idx):
"""
>>> N['dog'][0].synset[0] == N['dog'][0]
1
>>> N['dog'][0].synset['dog'] == N['dog'][0]
1
>>> N['dog'][0].synset[N['dog']] == N['dog'][0]
1
>>> N['cat'][6]
'cat' in {noun: big cat, cat}
"""
senses = self.getSenses()
if isinstance(idx, Word):
idx = idx.form
if isinstance(idx, StringType):
idx = _index(idx, map(lambda sense:sense.form, senses)) or \
_index(idx, map(lambda sense:sense.form, senses), _equalsIgnoreCase)
return senses[idx]
def __getslice__(self, i, j):
return self.getSenses()[i:j]
class Sense:
"""A specific meaning of a specific word -- the intersection of a Word and a Synset.
Fields
------
form : string
The orthographic representation of the Word this is a Sense of.
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB
string : string
The same as form (for compatability with version 1.0).
synset : Synset
The Synset that this Sense is a sense of.
verbFrames : [integer]
A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that this
Sense partipates in. Defined only for verbs.
>>> decide = V['decide'][0].synset # first synset for 'decide'
>>> decide[0].verbFrames
(8, 2, 26, 29)
>>> decide[1].verbFrames
(8, 2)
>>> decide[2].verbFrames
(8, 26, 29)
"""
def __init__(sense, synset, senseTuple, verbFrames=None):
"Initialize a sense from a synset's senseTuple."
# synset is stored by key (pos, synset) rather than object
# reference, to avoid creating a circular reference between
# Senses and Synsets that will prevent the vm from
# garbage-collecting them.
sense.pos = synset.pos
"part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB"
sense.synsetOffset = synset.offset
"synset key. This is used to retrieve the sense."
sense.verbFrames = verbFrames
"""A sequence of integers that index into
VERB_FRAME_STRINGS. These list the verb frames that this
Sense partipates in. Defined only for verbs."""
(form, idString) = senseTuple
sense.position = None
if '(' in form:
index = string.index(form, '(')
key = form[index + 1:-1]
form = form[:index]
if key == 'a':
sense.position = ATTRIBUTIVE
elif key == 'p':
sense.position = PREDICATIVE
elif key == 'ip':
sense.position = IMMEDIATE_POSTNOMINAL
else:
raise "unknown attribute " + key
sense.form = string.replace(form, '_', ' ')
"orthographic representation of the Word this is a Sense of."
def __getattr__(self, name):
# see the note at __init__ about why 'synset' is provided as a
# 'virtual' slot
if name == 'synset':
return getSynset(self.pos, self.synsetOffset)
elif name == 'lexname':
return self.synset.lexname
else:
raise AttributeError, name
def __str__(self):
"""Return a human-readable representation.
>>> str(N['dog'])
'dog(n.)'
"""
return `self.form` + " in " + str(self.synset)
def __repr__(self):
"""If ReadableRepresentations is true, return a human-readable
representation, e.g. 'dog(n.)'.
If ReadableRepresentations is false, return a machine-readable
representation, e.g. "getWord('dog', 'noun')".
"""
if ReadableRepresentations:
return str(self)
return "%s[%s]" % (`self.synset`, `self.form`)
def getPointers(self, pointerType=None):
"""Return a sequence of Pointers.
If pointerType is specified, only pointers of that type are
returned. In this case, pointerType should be an element of
POINTER_TYPES.
>>> N['dog'][0].getPointers()[:5]
(hypernym -> {noun: canine, canid}, member meronym -> {noun: Canis, genus Canis}, member meronym -> {noun: pack}, hyponym -> {noun: pooch, doggie, doggy, barker, bow-wow}, hyponym -> {noun: cur, mongrel, mutt})
>>> N['dog'][0].getPointers(HYPERNYM)
(hypernym -> {noun: canine, canid},)
"""
senseIndex = _index(self, self.synset.getSenses())
def pointsFromThisSense(pointer, selfIndex=senseIndex):
return pointer.sourceIndex == 0 or pointer.sourceIndex - 1 == selfIndex
return filter(pointsFromThisSense, self.synset.getPointers(pointerType))
pointers = getPointers # backwards compatability
def getPointerTargets(self, pointerType=None):
"""Return a sequence of Senses or Synsets.
If pointerType is specified, only targets of pointers of that
type are returned. In this case, pointerType should be an
element of POINTER_TYPES.
>>> N['dog'][0].getPointerTargets()[:5]
[{noun: canine, canid}, {noun: Canis, genus Canis}, {noun: pack}, {noun: pooch, doggie, doggy, barker, bow-wow}, {noun: cur, mongrel, mutt}]
>>> N['dog'][0].getPointerTargets(HYPERNYM)
[{noun: canine, canid}]
"""
return map(Pointer.target, self.getPointers(pointerType))
pointerTargets = getPointerTargets # backwards compatability
def getSenses(self):
return self,
senses = getSenses # backwards compatability
def isTagged(self):
"""Return 1 if any sense is tagged.
>>> N['dog'][0].isTagged()
1
>>> N['dog'][1].isTagged()
0
"""
word = self.word()
return _index(self, word.getSenses()) < word.taggedSenseCount
def getWord(self):
return getWord(self.form, self.pos)
word = getWord # backwards compatability
def __cmp__(self, other):
def senseIndex(sense, synset=self.synset):
return _index(sense, synset.getSenses(), testfn=lambda a,b: a.form == b.form)
return _compareInstances(self, other, ('synset',)) or cmp(senseIndex(self), senseIndex(other))
class Pointer:
""" A typed directional relationship between Senses or Synsets.
Fields
------
type : string
One of POINTER_TYPES.
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
"""
_POINTER_TYPE_TABLE = {
'!': ANTONYM,
'@': HYPERNYM,
'~': HYPONYM,
'=': ATTRIBUTE,
'^': ALSO_SEE,
'*': ENTAILMENT,
'>': CAUSE,
'$': VERB_GROUP,
'#m': MEMBER_MERONYM,
'#s': SUBSTANCE_MERONYM,
'#p': PART_MERONYM,
'%m': MEMBER_HOLONYM,
'%s': SUBSTANCE_HOLONYM,
'%p': PART_HOLONYM,
'&': SIMILAR,
'<': PARTICIPLE_OF,
'\\': PERTAINYM,
# New in wn 2.0:
'+': FRAMES,
';c': CLASSIF_CATEGORY,
';u': CLASSIF_USAGE,
';r': CLASSIF_REGIONAL,
'-c': CLASS_CATEGORY,
'-u': CLASS_USAGE,
'-r': CLASS_REGIONAL,
# New in wn 2.1:
'@i': INSTANCE_HYPERNYM,
'~i': INSTANCE_HYPONYM,
}
def __init__(self, sourceOffset, pointerTuple):
(type, offset, pos, indices) = pointerTuple
self.type = Pointer._POINTER_TYPE_TABLE[type]
"""One of POINTER_TYPES."""
self.sourceOffset = sourceOffset
self.targetOffset = int(offset)
self.pos = _normalizePOS(pos)
"""part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB"""
indices = string.atoi(indices, 16)
self.sourceIndex = indices >> 8
self.targetIndex = indices & 255
def getSource(self):
synset = getSynset(self.pos, self.sourceOffset)
if self.sourceIndex:
return synset[self.sourceIndex - 1]
else:
return synset
source = getSource # backwards compatability
def getTarget(self):
synset = getSynset(self.pos, self.targetOffset)
if self.targetIndex:
return synset[self.targetIndex - 1]
else:
return synset
target = getTarget # backwards compatability
def __str__(self):
return self.type + " -> " + str(self.target())
def __repr__(self):
if ReadableRepresentations:
return str(self)
return "<" + str(self) + ">"
def __cmp__(self, other):
diff = _compareInstances(self, other, ('pos', 'sourceOffset'))
if diff:
return diff
synset = self.source()
def pointerIndex(sense, synset=synset):
return _index(sense, synset.getPointers(), testfn=lambda a,b: not _compareInstances(a, b, ('type', 'sourceIndex', 'targetIndex')))
return cmp(pointerIndex(self), pointerIndex(other))
# Loading the lexnames
# Klaus Ries <ries@cs.cmu.edu>
class Lexname:
dict = {}
lexnames = []
def __init__(self,name,category):
self.name = name
self.category = category
Lexname.dict[name] = self
Lexname.lexnames.append(self)
def __str__(self):
return self.name
def setupLexnames():
for l in open(WNSEARCHDIR+'/lexnames').readlines():
i,name,category = string.split(l)
Lexname(name,PartsOfSpeech[int(category)-1])
setupLexnames()
#
# Dictionary
#
class Dictionary:
"""A Dictionary contains all the Words in a given part of speech.
This module defines four dictionaries, bound to N, V, ADJ, and ADV.
Indexing a dictionary by a string retrieves the word named by that
string, e.g. dict['dog']. Indexing by an integer n retrieves the
nth word, e.g. dict[0]. Access by an arbitrary integer is very
slow except in the special case where the words are accessed
sequentially; this is to support the use of dictionaries as the
range of a for statement and as the sequence argument to map and
filter.
Example
-------
>>> N['dog']
dog(n.)
Fields
------
pos : string
The part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB.
"""
def __init__(self, pos, filenameroot):
self.pos = pos
"""part of speech -- one of NOUN, VERB, ADJECTIVE, ADVERB"""
self.indexFile = _IndexFile(pos, filenameroot)
self.dataFile = open(_dataFilePathname(filenameroot), _FILE_OPEN_MODE)
def __repr__(self):
dictionaryVariables = {N: 'N', V: 'V', ADJ: 'ADJ', ADV: 'ADV'}
if dictionaryVariables.get(self):
return self.__module__ + "." + dictionaryVariables[self]
return "<%s.%s instance for %s>" % (self.__module__, "Dictionary", self.pos)
def getWord(self, form, line=None):
key = string.replace(string.lower(form), ' ', '_')
pos = self.pos
def loader(key=key, line=line, indexFile=self.indexFile):
line = line or indexFile.get(key)
return line and Word(line)
word = _entityCache.get((pos, key), loader)
if word:
return word
else:
raise KeyError, "%s is not in the %s database" % (`form`, `pos`)
def getSynset(self, offset):
pos = self.pos
def loader(pos=pos, offset=offset, dataFile=self.dataFile):
return Synset(pos, offset, _lineAt(dataFile, offset))
return _entityCache.get((pos, offset), loader)
def _buildIndexCacheFile(self):
self.indexFile._buildIndexCacheFile()
#
# Sequence protocol (a Dictionary's items are its Words)
#
def __nonzero__(self):
"""Return false. (This is to avoid scanning the whole index file
to compute len when a Dictionary is used in test position.)
>>> N and 'true'
'true'
"""
return 1
def __len__(self):
"""Return the number of index entries.
>>> len(ADJ)
21435
"""
if not hasattr(self, 'length'):
self.length = len(self.indexFile)
return self.length
def __getslice__(self, a, b):
results = []
if type(a) == type('') and type(b) == type(''):
raise "unimplemented"
elif type(a) == type(1) and type(b) == type(1):
for i in range(a, b):
results.append(self[i])
else:
raise TypeError
return results
def __getitem__(self, index):
"""If index is a String, return the Word whose form is
index. If index is an integer n, return the Word
indexed by the n'th Word in the Index file.
>>> N['dog']
dog(n.)
>>> N[0]
'hood(n.)
"""
if isinstance(index, StringType):
return self.getWord(index)
elif isinstance(index, IntType):
line = self.indexFile[index]
return self.getWord(string.replace(line[:string.find(line, ' ')], '_', ' '), line)
else:
raise TypeError, "%s is not a String or Int" % `index`
#
# Dictionary protocol
#
# a Dictionary's values are its words, keyed by their form
#
def get(self, key, default=None):
"""Return the Word whose form is _key_, or _default_.
>>> N.get('dog')
dog(n.)
>>> N.get('inu')
"""
try:
return self[key]
except LookupError:
return default
def keys(self):
"""Return a sorted list of strings that index words in this
dictionary."""
return self.indexFile.keys()
def has_key(self, form):
"""Return true iff the argument indexes a word in this dictionary.
>>> N.has_key('dog')
1
>>> N.has_key('inu')
0
"""
return self.indexFile.has_key(form)
#
# Testing
#
def _testKeys(self):
"""Verify that index lookup can find each word in the index file."""
print "Testing: ", self
file = open(self.indexFile.file.name, _FILE_OPEN_MODE)
counter = 0
while 1:
line = file.readline()
if line == '': break
if line[0] != ' ':
key = string.replace(line[:string.find(line, ' ')], '_', ' ')
if (counter % 1000) == 0:
print "%s..." % (key,),
import sys
sys.stdout.flush()
counter = counter + 1
self[key]
file.close()
print "done."
class _IndexFile:
"""An _IndexFile is an implementation class that presents a
Sequence and Dictionary interface to a sorted index file."""
def __init__(self, pos, filenameroot):
self.pos = pos
self.file = open(_indexFilePathname(filenameroot), _FILE_OPEN_MODE)
self.offsetLineCache = {} # Table of (pathname, offset) -> (line, nextOffset)
self.rewind()
self.shelfname = os.path.join(WNSEARCHDIR, pos + ".pyidx")
try:
import shelve
self.indexCache = shelve.open(self.shelfname, 'r')
except:
pass
def rewind(self):
self.file.seek(0)
while 1:
offset = self.file.tell()
line = self.file.readline()
if (line[0] != ' '):
break
self.nextIndex = 0
self.nextOffset = offset
#
# Sequence protocol (an _IndexFile's items are its lines)
#
def __nonzero__(self):
return 1
def __len__(self):
if hasattr(self, 'indexCache'):
return len(self.indexCache)
self.rewind()
lines = 0
while 1:
line = self.file.readline()
if line == "":
break
lines = lines + 1
return lines
def __nonzero__(self):
return 1
def __getitem__(self, index):
if isinstance(index, StringType):
if hasattr(self, 'indexCache'):
return self.indexCache[index]
return binarySearchFile(self.file, index, self.offsetLineCache, 8)
elif isinstance(index, IntType):
if hasattr(self, 'indexCache'):
return self.get(self.keys[index])
if index < self.nextIndex:
self.rewind()
while self.nextIndex <= index:
self.file.seek(self.nextOffset)
line = self.file.readline()
if line == "":
raise IndexError, "index out of range"
self.nextIndex = self.nextIndex + 1
self.nextOffset = self.file.tell()
return line
else:
raise TypeError, "%s is not a String or Int" % `index`
#
# Dictionary protocol
#
# (an _IndexFile's values are its lines, keyed by the first word)
#
def get(self, key, default=None):
try:
return self[key]
except LookupError:
return default
def keys(self):
if hasattr(self, 'indexCache'):
keys = self.indexCache.keys()
keys.sort()
return keys
else:
keys = []
self.rewind()
while 1:
line = self.file.readline()
if not line: break
key = line.split(' ', 1)[0]
keys.append(key.replace('_', ' '))
return keys
def has_key(self, key):
key = key.replace(' ', '_') # test case: V['haze over']
if hasattr(self, 'indexCache'):
return self.indexCache.has_key(key)
return self.get(key) != None
#
# Index file
#
def _buildIndexCacheFile(self):
import shelve
import os
print "Building %s:" % (self.shelfname,),
tempname = self.shelfname + ".temp"
try:
indexCache = shelve.open(tempname)
self.rewind()
count = 0
while 1:
offset, line = self.file.tell(), self.file.readline()
if not line: break
key = line[:string.find(line, ' ')]
if (count % 1000) == 0:
print "%s..." % (key,),
import sys
sys.stdout.flush()
indexCache[key] = line
count = count + 1
indexCache.close()
os.rename(tempname, self.shelfname)
finally:
try: os.remove(tempname)
except: pass
print "done."
self.indexCache = shelve.open(self.shelfname, 'r')
#
# Lookup functions
#
def getWord(form, pos='noun'):
"Return a word with the given lexical form and pos."
return _dictionaryFor(pos).getWord(form)
def getSense(form, pos='noun', senseno=0):
"Lookup a sense by its sense number. Used by repr(sense)."
return getWord(form, pos)[senseno]
def getSynset(pos, offset):
"Lookup a synset by its offset. Used by repr(synset)."
return _dictionaryFor(pos).getSynset(offset)
getword, getsense, getsynset = getWord, getSense, getSynset
#
# Private utilities
#
def _requirePointerType(pointerType):
if pointerType not in POINTER_TYPES:
raise TypeError, `pointerType` + " is not a pointer type"
return pointerType
def _compareInstances(a, b, fields):
""""Return -1, 0, or 1 according to a comparison first by type,
then by class, and finally by each of fields.""" # " <- for emacs
if not hasattr(b, '__class__'):
return cmp(type(a), type(b))
elif a.__class__ != b.__class__:
return cmp(a.__class__, b.__class__)
for field in fields:
diff = cmp(getattr(a, field), getattr(b, field))
if diff:
return diff
return 0
def _equalsIgnoreCase(a, b):
"""Return true iff a and b have the same lowercase representation.
>>> _equalsIgnoreCase('dog', 'Dog')
1
>>> _equalsIgnoreCase('dOg', 'DOG')
1
"""
return a == b or string.lower(a) == string.lower(b)
#
# File utilities
#
def _dataFilePathname(filenameroot):
if os.name in ('dos', 'nt'):
path = os.path.join(WNSEARCHDIR, filenameroot + ".dat")
if os.path.exists(path):
return path
return os.path.join(WNSEARCHDIR, "data." + filenameroot)
def _indexFilePathname(filenameroot):
if os.name in ('dos', 'nt'):
path = os.path.join(WNSEARCHDIR, filenameroot + ".idx")
if os.path.exists(path):
return path
return os.path.join(WNSEARCHDIR, "index." + filenameroot)
def binarySearchFile(file, key, cache={}, cacheDepth=-1):
from stat import ST_SIZE
key = key + ' '
keylen = len(key)
start, end = 0, os.stat(file.name)[ST_SIZE]
currentDepth = 0
#count = 0
while start < end:
#count = count + 1
#if count > 20:
# raise "infinite loop"
lastState = start, end
middle = (start + end) / 2
if cache.get(middle):
offset, line = cache[middle]
else:
file.seek(max(0, middle - 1))
if middle > 0:
file.readline()
offset, line = file.tell(), file.readline()
if currentDepth < cacheDepth:
cache[middle] = (offset, line)
#print start, middle, end, offset, line,
if offset > end:
assert end != middle - 1, "infinite loop"
end = middle - 1
elif line[:keylen] == key:# and line[keylen + 1] == ' ':
return line
#elif offset == end:
# return None
elif line > key:
assert end != middle - 1, "infinite loop"
end = middle - 1
elif line < key:
start = offset + len(line) - 1
currentDepth = currentDepth + 1
thisState = start, end
if lastState == thisState:
# detects the condition where we're searching past the end
# of the file, which is otherwise difficult to detect
return None
return None
def _lineAt(file, offset):
file.seek(offset)
return file.readline()
#
# Sequence Utility Functions
#
def _index(key, sequence, testfn=None, keyfn=None):
"""Return the index of key within sequence, using testfn for
comparison and transforming items of sequence by keyfn first.
>>> _index('e', 'hello')
1
>>> _index('E', 'hello', testfn=_equalsIgnoreCase)
1
>>> _index('x', 'hello')
"""
index = 0
for element in sequence:
value = element
if keyfn:
value = keyfn(value)
if (not testfn and value == key) or (testfn and testfn(value, key)):
return index
index = index + 1
return None
def _partition(sequence, size, count):
"""Partition sequence into count subsequences of size
length, and a remainder.
Return (partitions, remainder), where partitions is a sequence of
count subsequences of cardinality count, and
apply(append, partitions) + remainder == sequence."""
partitions = []
for index in range(0, size * count, size):
partitions.append(sequence[index:index + size])
return (partitions, sequence[size * count:])
#
# Cache management
#
# Some kind of cache is necessary since Sense -> Synset references are
# stored by key, and it's nice not to have to cons a new copy of a
# Synset that's been paged in each time a Sense's synset is retrieved.
# Ideally, we'd use a weak dict, but there aren't any. A strong dict
# reintroduces the problem that eliminating the Sense <-> Synset
# circularity was intended to resolve: every entity ever seen is
# preserved forever, making operations that iterate over the entire
# database prohibitive.
#
# The LRUCache approximates a weak dict in the case where temporal
# locality is good.
class _LRUCache:
""" A cache of values such that least recently used element is
flushed when the cache fills.
Private fields
--------------
entities
a dict from key -> (value, timestamp)
history
is a dict from timestamp -> key
nextTimeStamp
is the timestamp to use with the next value that's added.
oldestTimeStamp
The timestamp of the oldest element (the next one to remove),
or slightly lower than that.
This lets us retrieve the key given the timestamp, and the
timestamp given the key. (Also the value given either one.)
That's necessary so that we can reorder the history given a key,
and also manipulate the values dict given a timestamp. #
I haven't tried changing history to a List. An earlier
implementation of history as a List was slower than what's here,
but the two implementations aren't directly comparable."""
def __init__(this, capacity):
this.capacity = capacity
this.clear()
def clear(this):
this.values = {}
this.history = {}
this.oldestTimestamp = 0
this.nextTimestamp = 1
def removeOldestEntry(this):
while this.oldestTimestamp < this.nextTimestamp:
if this.history.get(this.oldestTimestamp):
key = this.history[this.oldestTimestamp]
del this.history[this.oldestTimestamp]
del this.values[key]
return
this.oldestTimestamp = this.oldestTimestamp + 1
def setCapacity(this, capacity):
if capacity == 0:
this.clear()
else:
this.capacity = capacity
while len(this.values) > this.capacity:
this.removeOldestEntry()
def get(this, key, loadfn=None):
value = None
if this.values:
pair = this.values.get(key)
if pair:
(value, timestamp) = pair
del this.history[timestamp]
if value == None:
value = loadfn and loadfn()
if this.values != None:
timestamp = this.nextTimestamp
this.nextTimestamp = this.nextTimestamp + 1
this.values[key] = (value, timestamp)
this.history[timestamp] = key
if len(this.values) > this.capacity:
this.removeOldestEntry()
return value
class _NullCache:
"""A NullCache implements the Cache interface (the interface that
LRUCache implements), but doesn't store any values."""
def clear():
pass
def get(this, key, loadfn=None):
return loadfn and loadfn()
DEFAULT_CACHE_CAPACITY = 1000
_entityCache = _LRUCache(DEFAULT_CACHE_CAPACITY)
def disableCache():
"""Disable the entity cache."""
_entityCache = _NullCache()
def enableCache():
"""Enable the entity cache."""
if not isinstance(_entityCache, LRUCache):
_entityCache = _LRUCache(size)
def clearCache():
"""Clear the entity cache."""
_entityCache.clear()
def setCacheCapacity(capacity=DEFAULT_CACHE_CAPACITY):
"""Set the capacity of the entity cache."""
enableCache()
_entityCache.setCapacity(capacity)
setCacheSize = setCacheCapacity # for compatability with version 1.0
#
# POS Dictionaries (must be initialized after file utilities)
#
N = Dictionary(NOUN, 'noun')
V = Dictionary(VERB, 'verb')
ADJ = Dictionary(ADJECTIVE, 'adj')
ADV = Dictionary(ADVERB, 'adv')
Dictionaries = (N, V, ADJ, ADV)
#
# Part-of-speech tag normalization tables (must be initialized after
# POS dictionaries)
#
_POSNormalizationTable = {}
_POStoDictionaryTable = {}
def _initializePOSTables():
global _POSNormalizationTable, _POStoDictionaryTable
_POSNormalizationTable = {}
_POStoDictionaryTable = {}
for pos, abbreviations in (
(NOUN, "noun n n."),
(VERB, "verb v v."),
(ADJECTIVE, "adjective adj adj. a s"),
(ADVERB, "adverb adv adv. r")):
tokens = string.split(abbreviations)
for token in tokens:
_POSNormalizationTable[token] = pos
_POSNormalizationTable[string.upper(token)] = pos
for dict in Dictionaries:
_POSNormalizationTable[dict] = dict.pos
_POStoDictionaryTable[dict.pos] = dict
_initializePOSTables()
def _normalizePOS(pos):
norm = _POSNormalizationTable.get(pos)
if norm:
return norm
raise TypeError, `pos` + " is not a part of speech type"
def _dictionaryFor(pos):
pos = _normalizePOS(pos)
dict = _POStoDictionaryTable.get(pos)
if dict == None:
raise RuntimeError, "The " + `pos` + " dictionary has not been created"
return dict
def buildIndexFiles():
for dict in Dictionaries:
dict._buildIndexCacheFile()
#
# Testing
#
def _testKeys():
#This is slow, so don't do it as part of the normal test procedure.
for dictionary in Dictionaries:
dictionary._testKeys()
def _test(reset=0):
import doctest, wordnet
if reset:
doctest.master = None # This keeps doctest from complaining after a reload.
return doctest.testmod(wordnet)
| Python |
# Natural Language Toolkit: Eliza
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Authors: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
# Based on an Eliza implementation by Joe Strout <joe@strout.net>,
# Jeff Epler <jepler@inetnebr.com> and Jez Higgins <mailto:jez@jezuk.co.uk>.
# a translation table used to convert things you say into things the
# computer says back, e.g. "I am" --> "you are"
from en.parser.nltk_lite.chat import Chat, reflections
# a table of response pairs, where each pair consists of a
# regular expression, and a list of possible responses,
# with group-macros labelled as %1, %2.
pairs = (
(r'I need (.*)',
( "Why do you need %1?",
"Would it really help you to get %1?",
"Are you sure you need %1?")),
(r'Why don\'t you (.*)',
( "Do you really think I don't %1?",
"Perhaps eventually I will %1.",
"Do you really want me to %1?")),
(r'Why can\'t I (.*)',
( "Do you think you should be able to %1?",
"If you could %1, what would you do?",
"I don't know -- why can't you %1?",
"Have you really tried?")),
(r'I can\'t (.*)',
( "How do you know you can't %1?",
"Perhaps you could %1 if you tried.",
"What would it take for you to %1?")),
(r'I am (.*)',
( "Did you come to me because you are %1?",
"How long have you been %1?",
"How do you feel about being %1?")),
(r'I\'m (.*)',
( "How does being %1 make you feel?",
"Do you enjoy being %1?",
"Why do you tell me you're %1?",
"Why do you think you're %1?")),
(r'Are you (.*)',
( "Why does it matter whether I am %1?",
"Would you prefer it if I were not %1?",
"Perhaps you believe I am %1.",
"I may be %1 -- what do you think?")),
(r'What (.*)',
( "Why do you ask?",
"How would an answer to that help you?",
"What do you think?")),
(r'How (.*)',
( "How do you suppose?",
"Perhaps you can answer your own question.",
"What is it you're really asking?")),
(r'Because (.*)',
( "Is that the real reason?",
"What other reasons come to mind?",
"Does that reason apply to anything else?",
"If %1, what else must be true?")),
(r'(.*) sorry (.*)',
( "There are many times when no apology is needed.",
"What feelings do you have when you apologize?")),
(r'Hello(.*)',
( "Hello... I'm glad you could drop by today.",
"Hi there... how are you today?",
"Hello, how are you feeling today?")),
(r'I think (.*)',
( "Do you doubt %1?",
"Do you really think so?",
"But you're not sure %1?")),
(r'(.*) friend (.*)',
( "Tell me more about your friends.",
"When you think of a friend, what comes to mind?",
"Why don't you tell me about a childhood friend?")),
(r'Yes',
( "You seem quite sure.",
"OK, but can you elaborate a bit?")),
(r'(.*) computer(.*)',
( "Are you really talking about me?",
"Does it seem strange to talk to a computer?",
"How do computers make you feel?",
"Do you feel threatened by computers?")),
(r'Is it (.*)',
( "Do you think it is %1?",
"Perhaps it's %1 -- what do you think?",
"If it were %1, what would you do?",
"It could well be that %1.")),
(r'It is (.*)',
( "You seem very certain.",
"If I told you that it probably isn't %1, what would you feel?")),
(r'Can you (.*)',
( "What makes you think I can't %1?",
"If I could %1, then what?",
"Why do you ask if I can %1?")),
(r'Can I (.*)',
( "Perhaps you don't want to %1.",
"Do you want to be able to %1?",
"If you could %1, would you?")),
(r'You are (.*)',
( "Why do you think I am %1?",
"Does it please you to think that I'm %1?",
"Perhaps you would like me to be %1.",
"Perhaps you're really talking about yourself?")),
(r'You\'re (.*)',
( "Why do you say I am %1?",
"Why do you think I am %1?",
"Are we talking about you, or me?")),
(r'I don\'t (.*)',
( "Don't you really %1?",
"Why don't you %1?",
"Do you want to %1?")),
(r'I feel (.*)',
( "Good, tell me more about these feelings.",
"Do you often feel %1?",
"When do you usually feel %1?",
"When you feel %1, what do you do?")),
(r'I have (.*)',
( "Why do you tell me that you've %1?",
"Have you really %1?",
"Now that you have %1, what will you do next?")),
(r'I would (.*)',
( "Could you explain why you would %1?",
"Why would you %1?",
"Who else knows that you would %1?")),
(r'Is there (.*)',
( "Do you think there is %1?",
"It's likely that there is %1.",
"Would you like there to be %1?")),
(r'My (.*)',
( "I see, your %1.",
"Why do you say that your %1?",
"When your %1, how do you feel?")),
(r'You (.*)',
( "We should be discussing you, not me.",
"Why do you say that about me?",
"Why do you care whether I %1?")),
(r'Why (.*)',
( "Why don't you tell me the reason why %1?",
"Why do you think %1?" )),
(r'I want (.*)',
( "What would it mean to you if you got %1?",
"Why do you want %1?",
"What would you do if you got %1?",
"If you got %1, then what would you do?")),
(r'(.*) mother(.*)',
( "Tell me more about your mother.",
"What was your relationship with your mother like?",
"How do you feel about your mother?",
"How does this relate to your feelings today?",
"Good family relations are important.")),
(r'(.*) father(.*)',
( "Tell me more about your father.",
"How did your father make you feel?",
"How do you feel about your father?",
"Does your relationship with your father relate to your feelings today?",
"Do you have trouble showing affection with your family?")),
(r'(.*) child(.*)',
( "Did you have close friends as a child?",
"What is your favorite childhood memory?",
"Do you remember any dreams or nightmares from childhood?",
"Did the other children sometimes tease you?",
"How do you think your childhood experiences relate to your feelings today?")),
(r'(.*)\?',
( "Why do you ask that?",
"Please consider whether you can answer your own question.",
"Perhaps the answer lies within yourself?",
"Why don't you tell me?")),
(r'quit',
( "Thank you for talking with me.",
"Good-bye.",
"Thank you, that will be $150. Have a good day!")),
(r'(.*)',
( "Please tell me more.",
"Let's change focus a bit... Tell me about your family.",
"Can you elaborate on that?",
"Why do you say that %1?",
"I see.",
"Very interesting.",
"%1.",
"I see. And what does that tell you?",
"How does that make you feel?",
"How do you feel when you say that?"))
)
eliza = Chat(pairs, reflections)
def demo():
print "Therapist\n---------"
print "Talk to the program by typing in plain English, using normal upper-"
print 'and lower-case letters and punctuation. Enter "quit" when done.'
print '='*72
print "Hello. How are you feeling today?"
s = ""
while s != "quit":
s = "quit"
try: s = raw_input(">")
except EOFError:
print s
if s:
while s[-1] in "!.": s = s[:-1]
print eliza.respond(s)
if __name__ == "__main__":
demo()
| Python |
# Natural Language Toolkit: Zen Chatbot
#
# Copyright (C) 2005-2006 University of Melbourne
# Author: Peter Spiller <pspiller@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from re import *
from en.parser.nltk_lite.chat import *
pairs = (
(r'We (.*)',
("What do you mean, 'we'?",
"Don't include me in that!",
"I wouldn't be so sure about that.")),
(r'You should (.*)',
("Don't tell me what to do, buddy.",
"Really? I should, should I?")),
(r'You\'re(.*)',
("More like YOU'RE %1!",
"Hah! Look who's talking.",
"Come over here and tell me I'm %1.")),
(r'You are(.*)',
("More like YOU'RE %1!",
"Hah! Look who's talking.",
"Come over here and tell me I'm %1.")),
(r'I can\'t(.*)',
("You do sound like the type who can't %1.",
"Hear that splashing sound? That's my heart bleeding for you.",
"Tell somebody who might actually care.")),
(r'I think (.*)',
("I wouldn't think too hard if I were you.",
"You actually think? I'd never have guessed...")),
(r'I (.*)',
("I'm getting a bit tired of hearing about you.",
"How about we talk about me instead?",
"Me, me, me... Frankly, I don't care.")),
(r'How (.*)',
("How do you think?",
"Take a wild guess.",
"I'm not even going to dignify that with an answer.")),
(r'What (.*)',
("Do I look like an encylopedia?",
"Figure it out yourself.")),
(r'Why (.*)',
("Why not?",
"That's so obvious I thought even you'd have already figured it out.")),
(r'(.*)shut up(.*)',
("Make me.",
"Getting angry at a feeble NLP assignment? Somebody's losing it.",
"Say that again, I dare you.")),
(r'Shut up(.*)',
("Make me.",
"Getting angry at a feeble NLP assignment? Somebody's losing it.",
"Say that again, I dare you.")),
(r'Hello(.*)',
("Oh good, somebody else to talk to. Joy.",
"'Hello'? How original...")),
(r'(.*)',
("I'm getting bored here. Become more interesting.",
"Either become more thrilling or get lost, buddy.",
"Change the subject before I die of fatal boredom."))
)
rude = Chat(pairs, reflections)
def demo():
print "Unpleasant Chatbot (type 'quit' to exit)."
print '='*72
print "I suppose I should say hello."
converse(rude)
if __name__ == "__main__":
demo()
| Python |
# Natural Language Toolkit: Zen Chatbot
#
# Copyright (C) 2005-2006 University of Melbourne
# Author: Amy Holland <amyrh@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Zen Chatbot talks in gems of Zen wisdom.
This is a sample conversation with Zen Chatbot:
ZC: Welcome, my child.
me: Good afternoon.
ZC: Ask the question you have come to ask.
me: How can I achieve enlightenment?
ZC: How do you suppose?
me: Through meditation.
ZC: Form is emptiness, and emptiness form.
me: How can I empty my mind of worldly troubles?
ZC: Will an answer to that really help in your search for enlightenment?
me: Yes.
ZC: It is better to be right than to be certain.
me: I seek truth and wisdom.
ZC: The search for truth is a long journey.
me: Are you sure?
ZC: Maybe sure, maybe not sure.
The chatbot structure is based on that of chat.eliza. Thus, it uses
a translation table to convert from question to response
i.e. "I am" --> "you are"
Of course, since Zen Chatbot does not understand the meaning of any words,
responses are very limited. Zen Chatbot will usually answer very vaguely, or
respond to a question by asking a different question, in much the same way
as Eliza.
"""
from en.parser.nltk_lite.chat import *
# responses are matched top to bottom, so non-specific matches occur later
# for each match, a list of possible responses is provided
responses = (
# Zen Chatbot opens with the line "Welcome, my child." The usual
# response will be a greeting problem: 'good' matches "good morning",
# "good day" etc, but also "good grief!" and other sentences starting
# with the word 'good' that may not be a greeting
(r'(hello(.*))|(good [a-zA-Z]+)',
( "The path to enlightenment is often difficult to see.",
"Greetings. I sense your mind is troubled. Tell me of your troubles.",
"Ask the question you have come to ask.",
"Hello. Do you seek englightenment?")),
# "I need" and "I want" can be followed by a thing (eg 'help')
# or an action (eg 'to see you')
#
# This is a problem with this style of response -
# person: "I need you"
# chatbot: "me can be achieved by hard work and dedication of the mind"
# i.e. 'you' is not really a thing that can be mapped this way, so this
# interpretation only makes sense for some inputs
#
(r'i need (.*)',
( "%1 can be achieved by hard work and dedication of the mind.",
"%1 is not a need, but a desire of the mind. Clear your mind of such concerns.",
"Focus your mind on%1, and you will find what you need.")),
(r'i want (.*)',
( "Desires of the heart will distract you from the path to enlightenment.",
"Will%1 help you attain enlightenment?",
"Is%1 a desire of the mind, or of the heart?")),
# why questions are separated into three types:
# "why..I" e.g. "why am I here?" "Why do I like cake?"
# "why..you" e.g. "why are you here?" "Why won't you tell me?"
# "why..." e.g. "Why is the sky blue?"
# problems:
# person: "Why can't you tell me?"
# chatbot: "Are you sure I tell you?"
# - this style works for positives (e.g. "why do you like cake?")
# but does not work for negatives (e.g. "why don't you like cake?")
(r'why (.*) i (.*)\?',
( "You%1%2?",
"Perhaps you only think you%1%2")),
(r'why (.*) you(.*)\?',
( "Why%1 you%2?",
"%2 I%1",
"Are you sure I%2?")),
(r'why (.*)\?',
( "I cannot tell you why%1.",
"Why do you think %1?" )),
# e.g. "are you listening?", "are you a duck"
(r'are you (.*)\?',
( "Maybe%1, maybe not%1.",
"Whether I am%1 or not is God's business.")),
# e.g. "am I a duck?", "am I going to die?"
(r'am i (.*)\?',
( "Perhaps%1, perhaps not%1.",
"Whether you are%1 or not is not for me to say.")),
# what questions, e.g. "what time is it?"
# problems:
# person: "What do you want?"
# chatbot: "Seek truth, not what do me want."
(r'what (.*)\?',
( "Seek truth, not what%1.",
"What%1 should not concern you.")),
# how questions, e.g. "how do you do?"
(r'how (.*)\?',
( "How do you suppose?",
"Will an answer to that really help in your search for enlightenment?",
"Ask yourself not how, but why.")),
# can questions, e.g. "can you run?", "can you come over here please?"
(r'can you (.*)\?',
( "I probably can, but I may not.",
"Maybe I can%1, and maybe I cannot.",
"I can do all, and I can do nothing.")),
# can questions, e.g. "can I have some cake?", "can I know truth?"
(r'can i (.*)\?',
( "You can%1 if you believe you can%1, and have a pure spirit.",
"Seek truth and you will know if you can%1.")),
# e.g. "It is raining" - implies the speaker is certain of a fact
(r'it is (.*)',
( "How can you be certain that%1, when you do not even know yourself?",
"Whether it is%1 or not does not change the way the world is.")),
# e.g. "is there a doctor in the house?"
(r'is there (.*)\?',
( "There is%1 if you believe there is.",
"It is possible that there is%1.")),
# e.g. "is it possible?", "is this true?"
(r'is(.*)\?',
( "%1 is not relevant.",
"Does this matter?")),
# non-specific question
(r'(.*)\?',
( "Do you think %1?",
"You seek the truth. Does the truth seek you?",
"If you intentionally pursue the answers to your questions, the answers become hard to see.",
"The answer to your question cannot be told. It must be experienced.")),
# expression of hate of form "I hate you" or "Kelly hates cheese"
(r'(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)',
( "Perhaps it is not about hating %2, but about hate from within.",
"Weeds only grow when we dislike them",
"Hate is a very strong emotion.")),
# statement containing the word 'truth'
(r'(.*) truth(.*)',
( "Seek truth, and truth will seek you.",
"Remember, it is not the spoon which bends - only yourself.",
"The search for truth is a long journey.")),
# desire to do an action
# e.g. "I want to go shopping"
(r'i want to (.*)',
( "You may %1 if your heart truely desires to.",
"You may have to %1.")),
# desire for an object
# e.g. "I want a pony"
(r'i want (.*)',
( "Does your heart truely desire %1?",
"Is this a desire of the heart, or of the mind?")),
# e.g. "I can't wait" or "I can't do this"
(r'i can\'t (.*)',
( "What we can and can't do is a limitation of the mind.",
"There are limitations of the body, and limitations of the mind.",
"Have you tried to%1 with a clear mind?")),
# "I think.." indicates uncertainty. e.g. "I think so."
# problem: exceptions...
# e.g. "I think, therefore I am"
(r'i think (.*)',
( "Uncertainty in an uncertain world.",
"Indeed, how can we be certain of anything in such uncertain times.",
"Are you not, in fact, certain that%1?")),
# "I feel...emotions/sick/light-headed..."
(r'i feel (.*)',
( "Your body and your emotions are both symptoms of your mind."
"What do you believe is the root of such feelings?",
"Feeling%1 can be a sign of your state-of-mind.")),
# exclaimation mark indicating emotion
# e.g. "Wow!" or "No!"
(r'(.*)!',
( "I sense that you are feeling emotional today.",
"You need to calm your emotions.")),
# because [statement]
# e.g. "because I said so"
(r'because (.*)',
( "Does knowning the reasons behind things help you to understand"
" the things themselves?",
"If%1, what else must be true?")),
# yes or no - raise an issue of certainty/correctness
(r'(yes)|(no)',
( "Is there certainty in an uncertain world?",
"It is better to be right than to be certain.")),
# sentence containing word 'love'
(r'(.*)love(.*)',
( "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.",
"Free love!")),
# sentence containing word 'understand' - r
(r'(.*)understand(.*)',
( "If you understand, things are just as they are;"
" if you do not understand, things are just as they are.",
"Imagination is more important than knowledge.")),
# 'I', 'me', 'my' - person is talking about themself.
# this breaks down when words contain these - eg 'Thyme', 'Irish'
(r'(.*)(me )|( me)|(my)|(mine)|(i)(.*)',
( "'I', 'me', 'my'... these are selfish expressions.",
"Have you ever considered that you might be a selfish person?",
"Try to consider others, not just yourself.",
"Think not just of yourself, but of others.")),
# 'you' starting a sentence
# e.g. "you stink!"
(r'you (.*)',
( "My path is not of conern to you.",
"I am but one, and you but one more.")),
# say goodbye with some extra Zen wisdom.
(r'exit',
( "Farewell. The obstacle is the path.",
"Farewell. Life is a journey, not a destination.",
"Good bye. We are cups, constantly and quietly being filled."
"\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.")),
# fall through case -
# when stumped, respond with generic zen wisdom
#
(r'(.*)',
( "When you're enlightened, every word is wisdom.",
"Random talk is useless.",
"The reverse side also has a reverse side.",
"Form is emptiness, and emptiness is form.",
"I pour out a cup of water. Is the cup empty?"))
)
zen = Chat(responses, reflections)
def demo():
print '*'*75
print "Zen Chatbot!".center(75)
print '*'*75
print '"Look beyond mere words and letters - look into your mind"'.center(75)
print "* Talk your way to truth with Zen Chatbot."
print "* Type 'quit' when you have had enough."
print '*'*75
print "Welcome, my child."
converse(zen)
if __name__ == "__main__":
demo()
| Python |
# Natural Language Toolkit: Chatbots
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Authors: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
# Based on an Eliza implementation by Joe Strout <joe@strout.net>,
# Jeff Epler <jepler@inetnebr.com> and Jez Higgins <jez@jezuk.co.uk>.
"""
A class for simple chatbots. These perform simple pattern matching on sentences
typed by users, and respond with automatically generated sentences.
These chatbots may not work using the windows command line or the
windows IDLE GUI.
"""
import string
import re
import random
reflections = {
"am" : "are",
"was" : "were",
"i" : "you",
"i'd" : "you would",
"i've" : "you have",
"i'll" : "you will",
"my" : "your",
"are" : "am",
"you've" : "I have",
"you'll" : "I will",
"your" : "my",
"yours" : "mine",
"you" : "me",
"me" : "you"
}
class Chat(object):
def __init__(self, pairs, reflections={}):
"""
Initialize the chatbot. Pairs is a list of patterns and responses. Each
pattern is a regular expression matching the user's statement or question,
e.g. r'I like (.*)'. For each such pattern a list of possible responses
is given, e.g. ['Why do you like %1', 'Did you ever dislike %1']. Material
which is matched by parenthesized sections of the patterns (e.g. .*) is mapped to
the numbered positions in the responses, e.g. %1.
@type pairs: C{list} of C{tuple}
@param pairs: The patterns and responses
@type reflections: C{dict}
@param reflections: A mapping between first and second person expressions
@rtype: C{None}
"""
self._pairs = [(re.compile(x, re.IGNORECASE),y) for (x,y) in pairs]
self._reflections = reflections
# bug: only permits single word expressions to be mapped
def _substitute(self, str):
"""
Substitute words in the string, according to the specified reflections,
e.g. "I'm" -> "you are"
@type str: C{string}
@param str: The string to be mapped
@rtype: C{string}
"""
words = ""
for word in string.split(string.lower(str)):
if self._reflections.has_key(word):
word = self._reflections[word]
words += ' ' + word
return words
def _wildcards(self, response, match):
pos = string.find(response,'%')
while pos >= 0:
num = string.atoi(response[pos+1:pos+2])
response = response[:pos] + \
self._substitute(match.group(num)) + \
response[pos+2:]
pos = string.find(response,'%')
return response
def respond(self, str):
"""
Generate a response to the user input.
@type str: C{string}
@param str: The string to be mapped
@rtype: C{string}
"""
# check each pattern
for (pattern, response) in self._pairs:
match = pattern.match(str)
# did the pattern match?
if match:
resp = random.choice(response) # pick a random response
resp = self._wildcards(resp, match) # process wildcards
# fix munged punctuation at the end
if resp[-2:] == '?.': resp = resp[:-2] + '.'
if resp[-2:] == '??': resp = resp[:-2] + '?'
return resp
# Hold a conversation with a chatbot
def converse(bot, quit="quit"):
input = ""
while input != quit:
input = quit
try: input = raw_input(">")
except EOFError:
print input
if input:
while input[-1] in "!.": input = input[:-1]
print bot.respond(input)
| Python |
# Natural Language Toolkit: Teen Chatbot
#
# Copyright (C) 2005-2006 University of Melbourne
# Author: Selina Dennis <sjmd@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This chatbot is a tongue-in-cheek take on the average teen
anime junky that frequents YahooMessenger or MSNM.
All spelling mistakes and flawed grammar are intentional.
"""
reflections = {
"am" : "r",
"was" : "were",
"i" : "u",
"i'd" : "u'd",
"i've" : "u'v",
"ive" : "u'v",
"i'll" : "u'll",
"my" : "ur",
"are" : "am",
"you're" : "im",
"you've" : "ive",
"you'll" : "i'll",
"your" : "my",
"yours" : "mine",
"you" : "me",
"u" : "me",
"ur" : "my",
"urs" : "mine",
"me" : "u"
}
# Note: %1/2/etc are used without spaces prior as the chat bot seems
# to add a superfluous space when matching.
pairs = (
(r'I\'m (.*)',
( "ur%1?? that's so cool! kekekekeke ^_^ tell me more!",
"ur%1? neat!! kekeke >_<")),
(r'(.*) don\'t you (.*)',
( "u think I can%2??! really?? kekeke \<_\<",
"what do u mean%2??!",
"i could if i wanted, don't you think!! kekeke")),
(r'ye[as] [iI] (.*)',
( "u%1? cool!! how?",
"how come u%1??",
"u%1? so do i!!")),
(r'do (you|u) (.*)\??',
( "do i%2? only on tuesdays! kekeke *_*",
"i dunno! do u%2??")),
(r'(.*)\?',
( "man u ask lots of questions!",
"booooring! how old r u??",
"boooooring!! ur not very fun")),
(r'(cos|because) (.*)',
( "hee! i don't believe u! >_<",
"nuh-uh! >_<",
"ooooh i agree!")),
(r'why can\'t [iI] (.*)',
( "i dunno! y u askin me for!",
"try harder, silly! hee! ^_^",
"i dunno! but when i can't%1 i jump up and down!")),
(r'I can\'t (.*)',
( "u can't what??! >_<",
"that's ok! i can't%1 either! kekekekeke ^_^",
"try harder, silly! hee! ^&^")),
(r'(.*) (like|love|watch) anime',
( "omg i love anime!! do u like sailor moon??! ^&^",
"anime yay! anime rocks sooooo much!",
"oooh anime! i love anime more than anything!",
"anime is the bestest evar! evangelion is the best!",
"hee anime is the best! do you have ur fav??")),
(r'I (like|love|watch|play) (.*)',
( "yay! %2 rocks!",
"yay! %2 is neat!",
"cool! do u like other stuff?? ^_^")),
(r'anime sucks|(.*) (hate|detest) anime',
( "ur a liar! i'm not gonna talk to u nemore if u h8 anime *;*",
"no way! anime is the best ever!",
"nuh-uh, anime is the best!")),
(r'(are|r) (you|u) (.*)',
( "am i%1??! how come u ask that!",
"maybe! y shud i tell u?? kekeke >_>")),
(r'what (.*)',
( "hee u think im gonna tell u? .v.",
"booooooooring! ask me somethin else!")),
(r'how (.*)',
( "not tellin!! kekekekekeke ^_^",)),
(r'(hi|hello|hey) (.*)',
( "hi!!! how r u!!",)),
(r'quit',
( "mom says i have to go eat dinner now :,( bye!!",
"awww u have to go?? see u next time!!",
"how to see u again soon! ^_^")),
(r'(.*)',
( "ur funny! kekeke",
"boooooring! talk about something else! tell me wat u like!",
"do u like anime??",
"do u watch anime? i like sailor moon! ^_^",
"i wish i was a kitty!! kekekeke ^_^"))
)
from en.parser.nltk_lite.chat import Chat, converse
iesha = Chat(pairs, reflections)
def demo():
print "Iesha the TeenBoT: Simple Replication of ELIZA\n---------"
print "Talk to the program by typing in plain English, using normal upper-"
print 'and lower-case letters and punctuation. Enter "quit" when done.'
print '='*72
print "hi!! i'm iesha! who r u??!"
converse(iesha)
if __name__ == "__main__":
demo()
| Python |
# Natural Language Toolkit: Evaluation
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Utility functions for evaluating processing modules.
"""
import sets, math
def accuracy(reference, test):
"""
Given a list of reference values and a corresponding list of test
values, return the percentage of corresponding values that are
equal. In particular, return the percentage of indices
C{0<i<=len(test)} such that C{test[i] == reference[i]}.
@type reference: C{list}
@param reference: An ordered list of reference values.
@type test: C{list}
@param test: A list of values to compare against the corresponding
reference values.
@raise ValueError: If C{reference} and C{length} do not have the
same length.
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
num_correct = [1 for x,y in zip(reference, test) if x==y]
return float(len(num_correct)) / len(reference)
def precision(reference, test):
"""
Given a set of reference values and a set of test values, return
the percentage of test values that appear in the reference set.
In particular, return |C{reference}S{cap}C{test}|/|C{test}|.
If C{test} is empty, then return C{None}.
@type reference: C{Set}
@param reference: A set of reference values.
@type test: C{Set}
@param test: A set of values to compare against the reference set.
@rtype: C{float} or C{None}
"""
if len(test) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(test)
def recall(reference, test):
"""
Given a set of reference values and a set of test values, return
the percentage of reference values that appear in the test set.
In particular, return |C{reference}S{cap}C{test}|/|C{reference}|.
If C{reference} is empty, then return C{None}.
@type reference: C{Set}
@param reference: A set of reference values.
@type test: C{Set}
@param test: A set of values to compare against the reference set.
@rtype: C{float} or C{None}
"""
if len(reference) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(reference)
def f_measure(reference, test, alpha=0.5):
"""
Given a set of reference values and a set of test values, return
the f-measure of the test values, when compared against the
reference values. The f-measure is the harmonic mean of the
L{precision} and L{recall}, weighted by C{alpha}. In particular,
given the precision M{p} and recall M{r} defined by:
- M{p} = |C{reference}S{cap}C{test}|/|C{test}|
- M{r} = |C{reference}S{cap}C{test}|/|C{reference}|
The f-measure is:
- 1/(C{alpha}/M{p} + (1-C{alpha})/M{r})
If either C{reference} or C{test} is empty, then C{f_measure}
returns C{None}.
@type reference: C{Set}
@param reference: A set of reference values.
@type test: C{Set}
@param test: A set of values to compare against the reference set.
@rtype: C{float} or C{None}
"""
p = precision(reference, test)
r = recall(reference, test)
if p is None or r is None:
return None
if p == 0 or r == 0:
return 0
return 1.0/(alpha/p + (1-alpha)/r)
def log_likelihood(reference, test):
"""
Given a list of reference values and a corresponding list of test
probability distributions, return the average log likelihood of
the reference values, given the probability distributions.
@param reference: A list of reference values
@type reference: C{list}
@param test: A list of probability distributions over values to
compare against the corresponding reference values.
@type test: C{list} of L{ProbDist}
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
# Return the average value of dist.logprob(val).
total_likelihood = sum([dist.logprob(val)
for (val, dist) in zip(reference, test)])
return total_likelihood/len(reference)
class ConfusionMatrix(object):
"""
The confusion matrix between a list of reference values and a
corresponding list of test values. Entry [M{r},M{t}] of this
matrix is a count of the number of times that the reference value
M{r} corresponds to the test value M{t}. E.g.:
>>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split()
>>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
>>> cm = ConfusionMatrix(ref, test)
>>> print cm['NN', 'NN']
3
Note that the diagonal entries (M{Ri}=M{Tj}) of this matrix
corresponds to correct values; and the off-diagonal entries
correspond to incorrect values.
"""
def __init__(self, reference, test):
"""
Construct a new confusion matrix from a list of reference
values and a corresponding list of test values.
@type reference: C{list}
@param reference: An ordered list of reference values.
@type test: C{list}
@param test: A list of values to compare against the
corresponding reference values.
@raise ValueError: If C{reference} and C{length} do not have
the same length.
"""
if len(reference) != len(test):
raise ValueError('Lists must have the same length.')
# Get a list of all values.
values = dict([(val,1) for val in reference+test]).keys()
# Construct a value->index dictionary
indices = dict([(val,i) for (i,val) in enumerate(values)])
# Make a confusion matrix table.
confusion = [[0 for val in values] for val in values]
max_conf = 0 # Maximum confusion
for w,g in zip(reference, test):
confusion[indices[w]][indices[g]] += 1
max_conf = max(max_conf, confusion[indices[w]][indices[g]])
#: A list of all values in C{reference} or C{test}.
self._values = values
#: A dictionary mapping values in L{self._values} to their indices.
self._indices = indices
#: The confusion matrix itself (as a list of lists of counts).
self._confusion = confusion
#: The greatest count in L{self._confusion} (used for printing).
self._max_conf = 0
#: The total number of values in the confusion matrix.
self._total = len(reference)
#: The number of correct (on-diagonal) values in the matrix.
self._correct = sum([confusion[i][i] for i in range(len(values))])
def __getitem__(self, (li,lj)):
"""
@return: The number of times that value C{li} was expected and
value C{lj} was given.
@rtype: C{int}
"""
i = self._indices[li]
j = self._indices[lj]
return self._confusion[i][j]
def __repr__(self):
return '<ConfusionMatrix: %s/%s correct>' % (self._correct,
self._total)
def __str__(self):
return self.pp()
def pp(self, show_percents=False, values_in_chart=True):
"""
@return: A multi-line string representation of this confusion
matrix.
@todo: add marginals?
"""
confusion = self._confusion
if values_in_chart:
values = self._values
else:
values = range(len(self._values))
# Construct a format string for row values
valuelen = max([len(str(val)) for val in values])
value_format = '%' + `valuelen` + 's |'
# Construct a format string for matrix entries
if show_percents:
entrylen = 6
entry_format = '%5.1f%%'
else:
entrylen = len(`self._max_conf`)
entry_format = '%' + `entrylen` + 'd'
# Write the column values.
value_strings = [str(val) for val in values]
s = ''
for i in range(valuelen):
s += (' '*valuelen)+' |'
for val in value_strings:
if i >= valuelen-len(val):
s += val[i-valuelen+len(val)].rjust(entrylen+1)
else:
s += ' '*(entrylen+1)
s += ' |\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write the entries.
for i in range(len(values)):
s += value_format % values[i]
for j in range(len(values)):
s += ' '
if show_percents:
s += entry_format % (100.0*confusion[i][j]/self._total)
else:
s += entry_format % confusion[i][j]
s += ' |\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write a key
s += '(row = reference; col = test)\n'
if not values_in_chart:
s += 'Value key:\n'
for i, value in enumerate(self._values):
s += '%6d: %s\n' % (i, value)
return s
def key(self):
values = self._values
str = 'Value key:\n'
indexlen = len(`len(values)-1`)
key_format = ' %'+`indexlen`+'d: %s\n'
for i in range(len(values)):
str += key_format % (i, values[i])
return str
def demo():
print '-'*75
reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
test = 'DET VB VB DET NN NN NN IN DET NN'.split()
print 'Reference =', reference
print 'Test =', test
print 'Confusion matrix:'
print ConfusionMatrix(reference, test)
print 'Accuracy:', accuracy(reference, test)
print '-'*75
reference_set = sets.Set(reference)
test_set = sets.Set(test)
print 'Reference =', reference_set
print 'Test = ', test_set
print 'Precision:', precision(reference_set, test_set)
print ' Recall:', recall(reference_set, test_set)
print 'F-Measure:', f_measure(reference_set, test_set)
print '-'*75
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Semantic Interpretation
#
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Utility functions for batch-processing sentences: parsing and
extraction of the semantic representation of the root node of the the
syntax tree, followed by evaluation of the semantic representation in
a first-order model.
"""
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse.category import GrammarCategory
from en.parser.nltk_lite.parse.grammarfile import GrammarFile
from en.parser.nltk_lite.parse.tree import Tree
from evaluate import *
from logic import *
##############################################################
## Utility functions for connecting parse output to semantics
##############################################################
def text_parse(inputs, grammar, trace=0):
"""
Convert input sentences into syntactic trees.
"""
parses = {}
for sent in inputs:
tokens = list(tokenize.whitespace(sent))
parser = grammar.earley_parser(trace=trace)
syntrees = parser.get_parse_list(tokens)
parses[sent] = syntrees
return parses
def root_node(syntree, start='S'):
"""
Find the root node in a syntactic tree.
"""
# check that we have a tree
assert isinstance(syntree, Tree)
# in the featurechart parser, the root node is '[INIT]'
# so go down to the first child if necessary
if syntree.node.head() == start:
return syntree.node
elif syntree[0].node.head() == start:
return syntree[0].node
else:
raise ValueError("Tree not rooted in %s node" % start)
def semrep(node, beta_reduce=True):
"""
Find the semantic representation at a given tree node.
"""
# check that we have a GrammarCategory
assert isinstance(node, GrammarCategory)
try:
semrep = node.get_feature('sem')
if beta_reduce:
semrep = semrep.simplify()
return semrep
except KeyError:
print "Node has no 'sem' feature specification"
raise
def root_semrep(syntree, beta_reduce=True):
"""
Find the semantic representation at the root of a tree.
"""
node = root_node(syntree)
return semrep(node, beta_reduce=beta_reduce)
def text_interpret(inputs, grammar, beta_reduce=True):
"""
Add the semantic representation to each syntactic parse tree
of each input sentence.
"""
parses = text_parse(inputs, grammar)
semreps = {}
for sent in inputs:
syntrees = parses[sent]
syn_sem = \
[(syn, root_semrep(syn, beta_reduce=beta_reduce)) for syn in syntrees]
semreps[sent] = syn_sem
return semreps
def text_evaluate(inputs, grammar, model, assignment):
"""
Add the truth-in-a-model value to each semantic representation
for each syntactic parse of each input sentences.
"""
g = assignment
m = model
semreps = text_interpret(inputs, grammar)
evaluations = {}
for sent in inputs:
syn_sem_val = \
[(syn, sem, m.evaluate(str(sem), g)) for (syn, sem) in semreps[sent]]
evaluations[sent] = syn_sem_val
return evaluations
| Python |
# Natural Language Toolkit: Models for first-order languages with lambda
#
# Author: Ewan Klein <ewan@inf.ed.ac.uk>,
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
#
# $Id: evaluate.py 3581 2006-10-20 04:37:34Z ehk $
"""
Overview
========
This module provides data structures for representing first-order
models. A model is a pair M{<D,V>}, where M{D} is a domain of discourse and
M{V} is a valuation function for the non-logical constants of a
first-order language. We assume that the language is based on the
lambda calculus, in the style of Montague grammar.
We also assume that non-logical constants are either individual constants
or functors. In particular, rather than interpreting a one-place
predicate M{P} as a set M{S}, we interpret it as the corresponding
characteristic function M{f}, where M{f(a) = True} iff M{a} is in
M{S}. For example, instead of interpreting 'dog' as the set of
individuals M{{'d1', 'd2', 'd3'}}, we interpret it as the function
which maps 'd1', 'd2' and 'd3' to M{True} and every other entity to
M{False}.
Thus, as a first approximation, non-logical constants are interpreted
by the valuation M{V} as follows (note that M{e} is the type of
I{entities} and M{t} is the type of truth values):
- if S{alpha} is an individual constant, then M{V}(S{alpha})
is an element of M{D}.
- If S{gamma} is a functor of type (M{e} x ... x M{e}) -> M{t}, then
M{V}(S{gamma}) is a function M{f} from M{D} x ... x M{D} to M{{True, False}}.
However, since we are basing our language on the lambda calculus (see
L{logic}), a binary relation such as 'like' will not in fact be
associated with the type (M{e} x M{e}) -> M{t}, but rather the type
(M{e} -> (M{e} -> M{t})); i.e., a function from entities to a function
from entities to truth values. In other words, functors are assigned
'Curried' functions as their values. It should also be noted that
expressions of the language are not explicitly typed. We leave it to
the grammar writer to assign 'sensible' values to expressions rather
than enforcing any type-to-denotation consistency.
Characteristic Functions
========================
Within L{models}, Curried characteristic functions are implemented as
a subclass of dictionaries, using the L{CharFun} constructor.
>>> cf = CharFun({'d1' : CharFun({'d2': True}), 'd2' : CharFun({'d1': True})})
Values of a L{CharFun} are accessed by indexing in the usual way:
>>> cf['d1']
{'d2': True}
>>> cf['d1']['d2']
True
L{CharFun}s are 'sparse' data structures in the sense that they omit
entries of the form C{e: False}. In fact, they
behave just like ordinary dictionaries on keys which are
out of their domain, rather than yielding the value C{False}:
>>> cf['not in domain']
Traceback (most recent call last):
...
KeyError: 'not in domain'
The assignment of C{False} values is delegated to a wrapper method
L{app} of the L{Model} class. L{app} embodies the Closed World
assumption; i.e., where C{m} is an instance of L{Model}:
>>> m.app(cf,'not in domain')
False
It might be asked why we don't modify instances of L{CharFun} to give
the value C{False} in place of a C{KeyError} for some entity 'd3'
which is not a key for the dictionary. The reason is that this would
implement a behaviour equivalent to C{cf2} below, which yields the Boolean
C{False} for the entity 'd3' rather than a I{function} which yields
C{False} for every entity in the domain:
>>> cf2 = {'d1': {'d2': True}, {'d3': False}}
As a result, trying to evaluate something like C{cf2['d3']['d2']} would yield a
C{TypeError} rather than C{False}, as required.
In practise, it will often be more convenient for a user to specify
interpretations as M{n}-ary relations (i.e., sets of M{n}-tuples) rather
than as M{n}-ary functions. L{CharFun} provides a C{read} method which
will convert such relations into Curried characteristic functions:
>>> s = set([('d1', 'd2'), ('d3', 'd4')])
>>> cf = CharFun()
>>> cf.read(s)
>>> cf
{'d2': {'d1': True}, 'd4': {'d3': True}}
C{read} will raise an exception if the set is not in fact a
relation (i.e., contains tuples of different lengths):
>>> wrong = set([('d1', 'd2'), ('d2', 'd1', 'd3')])
>>> cf.read(wrong)
Traceback (most recent call last):
...
ValueError: Set contains sequences of different lengths
However, unary relations can be parsed to characteristic functions.
>>> unary = set(['d1', 'd2'])
>>> cf.read(unary)
>>> cf
{'d2': True, 'd1': True}
The function L{flatten} returns a set of the entities used as keys in
a L{CharFun} instance. The same information can be accessed via the
C{domain} attribute of L{CharFun}.
>>> cf = CharFun({'d1' : {'d2': True}, 'd2' : {'d1': True}})
>>> flatten(cf)
set(['d2', 'd1'])
>>> cf.domain
set(['d2', 'd1'])
Valuations and Assignments
==========================
Valuations
----------
A I{Valuation} is a mapping from non-logical constants to appropriate semantic
values in the model. Valuations are created using the L{Valuation} constructor.
>>> val = Valuation({'Fido' : 'd1', 'dog' : {'d1' : True, 'd2' : True}})
>>> val
{'Fido': 'd1', 'dog': {'d2': True, 'd1': True}}
As with L{CharFun}, an instance of L{Valuation} will read valuations using
relations rather than characteristic functions as interpretations.
>>> setval = [('adam', 'b1'), ('betty', 'g1'),\
('girl', set(['g2', 'g1'])), ('boy', set(['b1', 'b2'])),\
('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
>>> val = Valuation()
>>> val.read(setval)
>>> print val
{'adam': 'b1',
'betty': 'g1',
'boy': {'b1': True, 'b2': True},
'girl': {'g2': True, 'g1': True},
'love': {'b1': {'g2': True, 'g1': True},
'g1': {'b1': True},
'g2': {'b2': True}}}
Valuations have a C{domain} attribute, like L{CharFun}, and also a C{symbols}
attribute.
>>> val.domain
set(['g1', 'g2', 'b2', 'b1'])
>>> val.symbols
['boy', 'girl', 'love', 'adam', 'betty']
Assignments
-----------
A variable I{Assignment} is a mapping from individual variables to
entities in the domain. Individual variables are indicated with the
letters 'x', 'y', 'w' and 'z', optionally followed by an integer
(e.g., 'x0', 'y332'). Assignments are created using the L{Assignment}
constructor, which also takes the domain as a parameter.
>>> dom = set(['u1', 'u2', 'u3', 'u4'])
>>> g = Assignment(dom, {'x': 'u1', 'y': 'u2'})
>>> g
{'y': 'u2', 'x': 'u1'}
There is also a C{print} format for assignments which uses a notation
closer to that in logic textbooks:
>>> print g
g[u2/y][u1/x]
Initialization of an L{Assignment} instance checks that the variable
really is an individual variable and also that the value belongs to
the domain of discourse:
>>> Assignment(dom, {'xxx': 'u1', 'y': 'u2'})
Traceback (most recent call last):
...
AssertionError: Wrong format for an Individual Variable: 'xxx'
>>> Assignment(dom, {'x': 'u5', 'y': 'u2'})
Traceback (most recent call last):
...
AssertionError: 'u5' is not in the domain: set(['u4', 'u1', 'u3', 'u2'])
It is also possible to update an assignment using the L{add} method:
>>> dom = set(['u1', 'u2', 'u3', 'u4'])
>>> g = models.Assignment(dom, {})
>>> g.add('u1', 'x')
{'x': 'u1'}
>>> g.add('u1', 'xyz')
Traceback (most recent call last):
...
AssertionError: Wrong format for an Individual Variable: 'xyz'
>>> g.add('u2', 'x').add('u3', 'y').add('u4', 'x0')
{'y': 'u3', 'x': 'u2', 'x0': 'u4'}
>>> g.add('u5', 'x')
Traceback (most recent call last):
...
AssertionError: u5 is not in the domain set(['u4', 'u1', 'u3', 'u2'])
Variables (and their values) can be selectively removed from an
assignment with the L{purge} method:
>>> g
{'y': 'u3', 'x': 'u2', 'x0': 'u4'}
>>> g.purge('x')
>>> g
{'y': 'u3', 'x0': 'u4'}
With no arguments, L{purge} is equivalent to C{clear} on a dictionary:
>>> g.purge()
>>> g
{}
Models
======
The L{Model} constructor takes two parameters, a C{set} and a L{Valuation}.
>>> m = Model(val.domain, val)
The top-level method of a L{Model} instance is L{evaluate}, which
assigns a semantic value to expressions of the L{logic} module, under an assignment C{g}:
>>> m.evaluate('all x. ((boy x) implies (not (girl x)))', g)
True
evaluate
--------
L{evaluate} calls a recursive function L{satisfy}, which in turn
calls a function L{i} to interpret non-logical constants and
individual variables. L{i} first tries to call the model's L{Valuation} and
if that fails, calls the variable assignment C{g}. Any atomic expression which cannot be
assigned a value by L{i} raises an C{Undefined} exception; this is
caught by L{evaluate}, which returns the string 'Undefined'.
>>> m.evaluate('(walk adam)', g, trace=2)
... checking whether 'walk' is an individual variable
Expression 'walk' can't be evaluated by i and g.
'Undefined'
Boolean operators such as M{not}, M{and} and M{implies} are
implemented as dictionaries. For example:
>>> m.AND
{False: {False: False, True: False}, True: {False: False, True: True}}
A formula such as '(p and q)' is interpreted by indexing
the value of 'and' with the values of the two propositional arguments,
in the following manner:
>>> m.AND[m.evaluate('p', g)][m.evaluate('q', g)]
satisfy
-------
The L{satisfy} method assigns semantic values to arbitrary expressions
according to their syntactic structure, as determined by L{decompose}.
"""
from en.parser.nltk_lite.semantics import logic
from pprint import pformat
class Error(Exception): pass
class Undefined(Error): pass
class CharFun(dict):
"""
A dictionary which represents a Curried characteristic function.
"""
def __init__(self, charfun=None):
dict.__init__(self)
if charfun:
#assert isinstance(charfun, dict)
self.update(charfun)
def _isrel(self, s):
"""Check whether a set represents a relation (of any arity)."""
assert isinstance(s, set), "Argument is not a set"
if len(s) == 0:
return True
elif not isinstance(max(s),tuple) or len(max(s))==len(min(s)):
return True
else:
raise ValueError, "Set contains sequences of different lengths"
def _item2dict(self, item):
"""
Given an input such as the triple ('a', 'b', 'c'), return the L{CharFun}
{'c': {'b': {'a' : True}}}
@return: A characteristic function corresponding to the input.
@rtype: L{CharFun}
@param item: a literal or a tuple
"""
chf = {}
if isinstance(item, tuple):
# reverse the tuple
l = list(item)
l.reverse()
item = tuple(l)
if len(item)==1:
chf[item[0]] = True
elif len(item) > 1:
chf[item[0]] = self._item2dict(item[1:])
else:
chf[item] = True
return chf
def _merge(self, chf1, chf2):
k = chf2.keys()[0]
if k not in chf1:
chf1.update(chf2)
else:
self._merge(chf1[k], chf2[k])
return chf1
def read(self, s):
"""
Convert an M{n}-ary relation into its corresponding characteristic function.
@rtype: L{CharFun}
@type s: set
"""
assert self._isrel(s)
charfuns = []
for item in s:
charfuns.append(self._item2dict(item))
chf = reduce(self._merge, charfuns, {})
self.update(chf)
def tuples(self):
"""
Convert a L{CharFun} back into a set of tuples.
Given an input such as the L{CharFun} {'c': {'b': {'a': True}}},
return set([('a', 'b', 'c')])
"""
n = depth(self)
if n == 1:
tuples = self.domain
elif n == 2:
tuples = [(k2, k1) for k1 in self.keys() for k2 in self[k1].keys()]
elif n == 3:
tuples = [(k3, k2, k1) for k1 in self.keys() for k2 in self[k1].keys() for k3 in self[k1][k2].keys()]
else:
raise Error, "Only defined for CharFuns of depth <= 3"
result = set(tuples)
return result
def _getDomain(self):
return flatten(self)
domain = property(_getDomain, doc='Set-theoretic domain of a curried function')
def flatten(d):
"""
@return: The set of keys of a L{CharFun} instance.
@rtype: set
@type d: dict
"""
flat = []
try:
flat.extend(d.keys())
for v in d.values():
if isinstance(v, dict):
flat.extend(flatten(v))
else:
flat.append(v)
except AttributeError:
flat.append(d)
result = set(flat)
result.discard(True)
return result
def depth(cf):
"""
Calculate the depth of a L{CharFun}.
@return: Int
@type cf: L{CharFun}
"""
if True in cf.values():
return 1
else:
key = cf.keys()[0]
return 1+depth(cf[key])
class Valuation(dict):
"""
A dictionary which represents a model-theoretic Valuation of non-logical constants.
An attempt to initialize a L{Valuation} with an individual
variable expression (e.g., 'x3') will raise an error, as will an
attemp to read a list containing an individual variable
expression.
An instance of L{Valuation} will raise a KeyError exception (i.e.,
just behave like a standard dictionary) if indexed with an expression that
is not in its list of symbols.
"""
def __init__(self, valuation=None):
dict.__init__(self)
if valuation:
for k in valuation.keys():
if logic.is_indvar(k):
raise Error, "This looks like an individual variable: '%s'" % k
# Check if the valuation is of the form {'p': True}
if isinstance(valuation[k], bool):
self[k] = valuation[k]
else:
try:
cf = CharFun(valuation[k])
self[k] = cf
except (TypeError, ValueError):
self[k] = valuation[k]
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
raise Undefined, "Unknown expression: '%s'" % key
def read(self, seq):
"""
Parse a list such as C{[('j', 'b1'), ('girl', set(['g1', 'g2']))]} into a L{Valuation}.
@rtype: L{Valuation}
@param seq: A list of tuples of the form (I{constant}, I{relation}), where I{relation} is a set of tuples.
"""
d = dict(seq)
for k in d.keys():
if logic.is_indvar(k):
raise Error, "This looks like an individual variable: '%s'" % k
val = d[k]
if isinstance(val, str):
pass
else:
cf = CharFun()
cf.read(d[k])
d[k] = cf
self.update(d)
def __str__(self):
return pformat(self)
def _getDomain(self):
dom = set()
for v in self.values():
flat = flatten(v)
dom = dom.union(flat)
return dom
domain = property(_getDomain,
doc='Set-theoretic domain of the value-space of a Valuation.')
def _getSymbols(self):
return self.keys()
symbols = property(_getSymbols,
doc='The non-logical constants which the Valuation recognizes.')
class Assignment(dict):
"""
A dictionary which represents an assignment of values to variables.
An assigment can only assign values from its domain.
If an unknown expression M{a} is passed to a model M{M}'s
interpretation function M{i}, M{i} will first check whether M{M}'s
valuation assigns an interpretation to M{a} as a constant, and if
this fails, M{i} will delegate the interpretation of M{a} to
M{g}. M{g} only assigns values to individual variables (i.e.,
members of the class L{IndVariableExpression} in the L{logic}
module. If a variable is not assigned a value by M{g}, it will raise
an C{Undefined} exception.
"""
def __init__(self, domain, assignment=None):
dict.__init__(self)
self.domain = domain
if assignment:
for var in assignment.keys():
val = assignment[var]
assert val in self.domain,\
"'%s' is not in the domain: %s" % (val, self.domain)
assert logic.is_indvar(var),\
"Wrong format for an Individual Variable: '%s'" % var
self.update(assignment)
self._addvariant()
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
raise Undefined, "Unknown expression: '%s'" % key
def purge(self, var=None):
"""
Remove one or all keys (i.e. logic variables) from an
assignment, and update C{self.variant}.
@param var: a Variable acting as a key for the assignment.
"""
if var:
val = self[var]
del self[var]
else:
self.clear()
self._addvariant()
return None
def __str__(self):
"""
Pretty printing for assignments. {'x', 'u'} appears as 'g[u/x]'
"""
gstring = "g"
for (val, var) in self.variant:
gstring = gstring + "[" + str(val) + "/" + str(var) + "]"
return gstring
def _addvariant(self):
"""
Create a more pretty-printable version of the assignment.
"""
list = []
for item in self.items():
pair = (item[1], item[0])
list.append(pair)
self.variant = list
return None
def add(self, val, var):
"""
Add a new variable-value pair to the assignment, and update
C{self.variant}.
We write the arguments in the order 'val, var' by analogy with the
notation 'g[u/x]'.
"""
assert val in self.domain,\
"%s is not in the domain %s" % (val, self.domain)
assert logic.is_indvar(var),\
"Wrong format for an Individual Variable: '%s'" % var
self[var] = val
self._addvariant()
return self
class Model:
"""
A first order model is a domain M{D} of discourse and a valuation M{V}.
A domain M{D} is a set, and a valuation M{V} is a map that associates
expressions with values in the model.
The domain of M{V} should be a subset of M{D}.
"""
def __init__(self, domain, valuation, prop=None):
"""
Construct a new L{Model}.
@type domain: C{set}
@param domain: A set of entities representing the domain of discourse of the model.
@type valuation: L{Valuation}
@param valuation: the valuation of the model.
@param prop: If this is set, then we are building a propositional\
model and don't require the domain of M{V} to be subset of M{D}.
"""
assert isinstance(domain, set)
self.domain = domain
self.valuation = valuation
if prop is None:
if not domain.issuperset(valuation.domain):
raise Error,\
"The valuation domain, %s, must be a subset of the model's domain, %s"\
% (valuation.domain, domain)
def __repr__(self):
return "(%r, %r)" % (self.domain, self.valuation)
def __str__(self):
return "Domain = %s,\nValuation = \n%s" % (self.domain, self.valuation)
def app(self, fun, arg):
"""
Wrapper for handling KeyErrors and TypeErrors raised by
function application.
This constrains instances of L{CharFun} to return C{False} in
the right circumstances.
@param fun: an instance of L{CharFun}.
@param arg: an arbitrary semantic object
@return: If C{arg} is in C{fun}'s domain, then returns C{fun[arg]},\
else if C{arg} is in C{self.domain}, returns C{False},\
else raises C{Undefined} error.
"""
try:
return fun[arg]
except KeyError:
if arg in self.domain:
return False
else:
raise Undefined,\
"%s can't be applied as a function to %s" % (fun, arg)
except TypeError:
if fun == False:
return False
else:
raise Undefined,\
"%s can't be applied as a function to %s" % (fun, arg)
NOT = {True: False, False: True}
AND = {True: {True: True, False: False},
False: {True: False, False: False}}
OR = {True: {True: True, False: True},
False: {True: True, False: False}}
IMPLIES = {True: {True: True, False: False},
False: {True: True, False: True}}
IFF = {True: {True: True, False: False},
False: {True: False, False: True}}
def evaluate(self, expr, g, trace=None):
"""
Provides a handler for L{satisfy}
that blocks further propagation of C{Undefined} error.
@param expr: An C{Expression} of L{logic}.
@type g: L{Assignment}
@param g: an assignment to individual variables.
@return: C{bool} or 'Undefined'
"""
try:
value = self.satisfy(expr, g, trace=trace)
if trace:
print "'%s' evaluates to %s under M, %s" % (expr, value, g)
return value
except Undefined:
return 'Undefined'
def satisfy(self, expr, g, trace=None):
"""
Recursive interpretation function for a formula of first-order logic.
Raises an C{Undefined} error when C{expr} is an atomic string
but is not a symbol or an individual variable.
@return: Returns a truth value or C{Undefined} if C{expr} is\
complex, and calls the interpretation function C{i} if C{expr}\
is atomic.
@param expr: An expression of L{logic}.
@type g: L{Assignment}
@param g: an assignment to individual variables.
"""
OPS = {'and': Model.AND,
'or': Model.OR,
'implies': Model.IMPLIES,
'iff': Model.IFF}
try:
parsed = self.decompose(expr)
# expr is a variable or constant; we don't want to decompose it further
if isinstance(parsed, str):
return self.i(expr, g, trace)
# parsed is a pair of strings
else:
first, second = parsed
# maybe _first_ is an operator like 'and', 'not' or '=' and _second_ is a list of args
phi = second[0]
try:
psi = second[1]
# second can't be decomposed further
except IndexError:
pass
if first == 'not':
if trace:
print " '%s' evaluates to %s under M, %s." % (phi, self.satisfy(phi, g), g)
return not self.satisfy(phi, g, trace)
elif first in OPS:
value = OPS[first][self.satisfy(phi, g, trace)][self.satisfy(psi, g, trace)]
if trace:
print " '%s' evaluates to %s under M, %s" % (phi, self.satisfy(phi, g, trace), g)
print " '%s' evaluates to %s under M, %s" % (psi, self.satisfy(psi, g, trace), g)
return value
elif first == '=':
value = (self.satisfy(phi, g, trace) == self.satisfy(psi, g, trace))
if trace:
print " '%s' evaluates to %s under M, %s" % (phi, self.satisfy(phi, g, trace), g)
print " '%s' evaluates to %s under M, %s" % (psi, self.satisfy(psi, g, trace), g)
return value
# _first_ is something like 'some x' and _second_ is something like '(boy x)'
elif first[0] == 'some':
var = first[1]
phi = second
sat = self.satisfiers(phi, var, g, trace, nesting=1)
value = len(sat) > 0
if trace:
if value:
print " '%s' evaluates to %s under M, %s" % (phi, value, g)
if trace > 1:
print " satisfiers of %s under %s are %s" % (phi, g, sat)
else:
print " '%s' evaluates to %s under M, %s" % (phi, value, g)
if trace > 1:
print " satisfiers of %s under %s are %s" % (phi, g, sat)
return value
elif first[0] == 'all':
var = first[1]
phi = second
sat = self.satisfiers(phi, var, g, trace, nesting=1)
value = self.domain.issubset(sat)
if trace:
if value:
print " '%s' evaluates to %s under M, %s" % (phi, self.satisfy(phi, g, trace), g)
else:
notphi = '(not %s)' % phi
witness = self.satisfiers(notphi, var, g).pop()
g.add(witness, var)
print " '%s' evaluates to %s under M, %s" % (phi, self.satisfy(phi, g, trace), g)
return value
# maybe _first_ is something like 'boy' and _second_ is an argument expression like 'x'
else:
try:
funval = self.satisfy(first, g, trace)
argval = self.satisfy(second, g, trace)
app = self.app(funval, argval)
if trace > 1:
print "'%s': %s applied to %s yields %s"\
% (expr, funval, argval, app)
return app
# we can't get a proper interpretation
except TypeError:
print "The interpretation of %s cannot be applied to the interpretation of %s"\
% (first, second)
print "'%s': %s applied to %s yields %s"\
% (expr, funval, argval, app)
raise
except ValueError:
raise Undefined, "Cannot parse %s", expr
def i(self, expr, g, trace=False):
"""
An interpretation function.
Assuming that C{expr} is atomic:
- if C{expr} is a non-logical constant, calls the valuation M{V}
- else if C{expr} is an individual variable, calls assignment M{g}
- else returns C{Undefined}.
@param expr: an C{Expression} of L{logic}.
@type g: L{Assignment}
@param g: an assignment to individual variables.
@return: a semantic value
"""
try:
if trace > 1:
print " i, %s('%s') = %s" % (g, expr, self.valuation[expr])
# expr is a non-logical constant, i.e., in self.valuation.symbols
return self.valuation[expr]
except Undefined:
if trace > 1:
print " ... checking whether '%s' is an individual variable" % expr
pass
try:
if trace > 1:
print " i, %s('%s') = %s" % (g, expr, g[expr])
# expr wasn't a constant; maybe a variable that g knows about?
return g[expr]
# We should only get to this point if expr is not an
# individual variable or not assigned a value by g
except Undefined:
if trace:
print "Expression '%s' can't be evaluated by i and %s." % (expr, g)
raise
def freevar(self, var, expr):
"""
Is C{var} one of the free variables in C{expr}?
@type var: an C{Indvar} of L{logic}
@param var: the variable to test for.
@param expr: an C{Expression} of L{logic}.
@return: C{bool}
"""
parsed = logic.Parser().parse(expr)
variable = logic.Variable(var)
return variable in parsed.free()
def satisfiers(self, expr, var, g, trace=False, nesting=0):
"""
Show the entities from the model's domain that satisfy an open formula.
@param expr: the open formula
@param var: the relevant free variable in C{expr}.
@param g: the variable assignment
@return: the set of entities that satisfy C{expr}.
@rtype: C{set}
"""
spacer = ' '
indent = spacer + (spacer * nesting)
candidates = []
if self.freevar(var, expr):
if trace:
print
print (spacer * nesting) + "Open formula is '%s' with assignment %s" % (expr, g)
for u in self.domain:
g.add(u, var)
if trace > 1:
lowtrace = trace-1
else:
lowtrace = 0
value = self.satisfy(expr, g, lowtrace)
if trace:
print indent + "...trying assignment %s" % g
# expr == False under g[u/var]?
if value == False:
if trace:
print indent + "value of '%s' under %s is False" % (expr, g)
# so g[u/var] is a satisfying assignment
else:
candidates.append(u)
if trace:
print indent + "value of '%s' under %s is %s" % (expr, g, value)
result = set(candidates)
# var isn't free in expr
else:
raise Undefined, "%s is not free in %s" % (var, expr)
return result
def decompose(self, expr):
"""
Function to communicate with a first-order functional language.
This function tries to make weak assumptions about the parse structure
provided by the logic module. It makes the assumption that an expression
can be broken down into a pair of subexpressions:
- The C{(binder, body)} pair is for decomposing quantified formulae.
- The C{(op, args)} pair is for decomposing formulae with a boolean operator.
- The C{(fun, args)} pair should catch other relevant cases.
@param expr: A string representation of a first-order formula.
"""
try:
parsed = logic.Parser(constants=self.valuation.symbols).parse(expr)
except TypeError:
print "Cannot parse %s" % expr
try:
first, second = parsed.binder, parsed.body
#print 'first is %s, second is %s' % (first, second)
return (first, second)
except AttributeError:
pass
try:
first, second = parsed.op, parsed.args
#print 'first is %s, second is %s' % (first, second)
return (first, second)
except AttributeError:
pass
try:
first, second = str(parsed.first), str(parsed.second)
#print 'first is %s, second is %s' % (first, second)
return (first, second)
except (AttributeError, TypeError):
return expr
#//////////////////////////////////////////////////////////////////////
# TESTING
#//////////////////////////////////////////////////////////////////////
import unittest
class TestModels(unittest.TestCase):
"""
Unit tests for the L{Model} class.
"""
def testLogicSelectors(self):
"Tests for properties of formulae from 'logic' module."
v = Valuation()
m = Model(set([]), v)
# Existential quantification
pair = m.decompose('some x.(M N)')
self.assertEqual(pair[0], ('some', 'x'))
self.assertEqual(pair[1], '(M N)')
# Universal quantification
pair = m.decompose('all x.(M N)')
self.assertEqual(pair[0], ('all', 'x'))
self.assertEqual(pair[1], '(M N)')
# Boolean operators
pair = m.decompose('(and (M N) (P Q))')
self.assertEqual(pair[0], 'and')
self.assertEqual(pair[1], ['(M N)', '(P Q)'])
pair = m.decompose('(not M N P Q)')
self.assertEqual(pair[0], 'not')
self.assertEqual(pair[1], ['M', 'N', 'P', 'Q'])
# Just an application expression
pair = m.decompose('(M N P)')
self.assertEqual(pair[0], '(M N)')
self.assertEqual(pair[1], 'P')
def testValuations(self):
"Tests for characteristic functions and valuations."
cf = CharFun({'d1' : {'d1': True, 'd2': True}, 'd2' : {'d1': True}})
self.assertEqual(cf['d1'], {'d1': True, 'd2': True})
self.assertEqual(cf['d1']['d2'], True)
# doesn't work since cf not called on 'foo'
## self.assertRaises(KeyError, cf['foo'])
## self.assertRaises(KeyError, cf['d1']['foo'])
self.assertEqual(flatten(cf), set(['d1', 'd2']))
self.assertEqual(flatten(cf), cf.domain)
s1 = set([('d1', 'd2'), ('d1', 'd1'), ('d2', 'd1')])
cf1 = CharFun()
cf1.read(s1)
self.assertEqual(cf, cf1)
self.assertEqual(cf1.tuples(), s1)
s2 = set([('d1', 'd2'), ('d1', 'd2'), ('d1', 'd1'), ('d2', 'd1')])
cf2 = CharFun()
cf2.read(s2)
self.assertEqual(cf1, cf2)
unary = set(['d1', 'd2'])
cf.read(unary)
self.assertEqual(cf, {'d2': True, 'd1': True})
wrong = set([('d1', 'd2'), ('d2', 'd1', 'd3')])
self.assertRaises(ValueError, cf.read, wrong)
val = Valuation({'Fido' : 'd1', 'dog' : {'d1' : True, 'd2' : True}})
self.assertEqual(val['dog'], cf)
self.assertEqual(val['dog'][val['Fido']], True)
self.assertEqual(val.domain, set(['d1', 'd2']))
self.assertEqual(val.symbols, ['Fido', 'dog'])
setval = [('Fido', 'd1'), ('dog', set(['d1', 'd2']))]
val1 = Valuation()
val1.read(setval)
self.assertEqual(val, val1)
val1 = Valuation({'love': {'g1': {'b1': True}, 'b1': {'g1': True}, 'b2': {'g2': True}, 'g2': {'b1': True}}})
love1 = val1['love']
relation = set([('b1', 'g1'), ('g1', 'b1'), ('g2', 'b2'), ('b1', 'g2')])
self.assertEqual(love1.tuples(), relation)
val2 = Valuation()
val2.read([('love', set([('b1', 'g1'), ('g1', 'b1'), ('g2', 'b2'), ('b1', 'g2')]))])
love2 = val2['love']
self.assertEqual(love1.tuples(), love2.tuples())
def testFunArgApp(self):
"Tests for function argument application in a Model"
val = Valuation()
v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\
('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
val.read(v)
dom = val.domain
m = Model(dom, val)
g = Assignment(dom)
self.assertEqual(m.app(val['boy'], 'b1'), True)
self.assertEqual(m.app(val['boy'], 'g1'), False)
self.assertRaises(Undefined, m.app, val['boy'], 'foo')
def testBBModelCheck(self):
"Test the model checking with Blackburn & Bos testsuite"
val1 = Valuation()
v1 = [('jules', 'd1'), ('vincent', 'd2'), ('pumpkin', 'd3'),
('honey_bunny', 'd4'), ('yolanda', 'd5'),
('customer', set(['d1', 'd2'])),
('robber', set(['d3', 'd4'])),
('love', set([('d3', 'd4')]))]
val1.read(v1)
dom1 = val1.domain
m1 = Model(dom1, val1)
g1 = Assignment(dom1)
val2 = Valuation()
v2 = [('jules', 'd1'), ('vincent', 'd2'), ('pumpkin', 'd3'),
('honey_bunny', 'd4'), ('yolanda', 'd4'),
('customer', set(['d1', 'd2', 'd5', 'd6'])),
('robber', set(['d3', 'd4'])),
('love', set())]
val2.read(v2)
dom2 = set(['d1', 'd2', 'd3', 'd4', 'd5', 'd6'])
m2 = Model(dom2, val2)
g2 = Assignment(dom2)
g21 = Assignment(dom2)
g21.add('d3', 'y')
val3 = Valuation()
v3 = [('mia', 'd1'), ('jody', 'd2'), ('jules', 'd3'),
('vincent', 'd4'),
('woman', set(['d1', 'd2'])), ('man', set(['d3', 'd4'])),
('joke', set(['d5', 'd6'])), ('episode', set(['d7', 'd8'])),
('in', set([('d5', 'd7'), ('d5', 'd8')])),
('tell', set([('d1', 'd5'), ('d2', 'd6')]))]
val3.read(v3)
dom3 = set(['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8'])
m3 = Model(dom3, val3)
g3 = Assignment(dom3)
tests = [
('some x. (robber x)', m1, g1, True),
('some x. some y. (love x y)', m1, g1, True),
('some x0. some x1. (love x0 x1)', m2, g2, False),
('all x. all y. (love x y)', m2, g2, False),
('(not all x. all y. (love x y))', m2, g2, True),
('all x. all y. (not (love x y))', m2, g2, True),
('(yolanda = honey_bunny)', m2, g2, True),
('(mia = honey_bunny)', m2, g2, 'Undefined'),
('(not (yolanda = honey_bunny))', m2, g2, False),
('(not (mia = honey_bunny))', m2, g2, 'Undefined'),
('all x. ((robber x) or (customer x))', m2, g2, True),
('(not all x. ((robber x) or (customer x)))', m2, g2, False),
('((robber x) or (customer x))', m2, g2, 'Undefined'),
('((robber y) or (customer y))', m2, g21, True),
('some x. ((man x) and some x. (woman x))', m3, g3, True),
('(some x. (man x) and some x. (woman x))', m3, g3, True),
('(not some x. (woman x))', m3, g3, False),
('some x. ((tasty x) and (burger x))', m3, g3, 'Undefined'),
('(not some x. ((tasty x) and (burger x)))', m3, g3, 'Undefined'),
('some x. ((man x) and (not some y. (woman y)))', m3, g3, False),
('some x. ((man x) and (not some x. (woman x)))', m3, g3, False),
('some x. ((woman x) and (not some x. (customer x)))', m2, g2, 'Undefined'),
]
for item in tests:
sentence, model, g, testvalue = item
semvalue = model.evaluate(sentence, g)
self.assertEqual(semvalue, testvalue)
g.purge()
def testsuite():
suite = unittest.makeSuite(TestModels)
return unittest.TestSuite(suite)
def test(verbosity):
runner = unittest.TextTestRunner(verbosity=verbosity)
runner.run(testsuite())
#//////////////////////////////////////////////////////////////////////
# Demo..
#//////////////////////////////////////////////////////////////////////
# number of spacer chars
mult = 30
def propdemo(trace=None):
"""Example of a propositional model."""
global val1, dom1, m1, g1
val1 = Valuation({'p': True, 'q': True, 'r': False})
dom1 = set([])
m1 = Model(dom1, val1, prop=True)
g1 = Assignment(dom1)
print
print '*' * mult
print "Propositional Formulas Demo"
print '*' * mult
print "Model m1:\n", m1
print '*' * mult
sentences = [
'(p and q)',
'(p and r)',
'(not p)',
'(not r)',
'(not (not p))',
'(not (p and r))',
'(p or r)',
'(r or p)',
'(r or r)',
'((not p) or r))',
'(p or (not p))',
'(p implies q)',
'(p implies r)',
'(r implies p)',
'(p iff p)',
'(r iff r)',
'(p iff r)',
]
for sent in sentences:
if trace:
print
m1.evaluate(sent, g1, trace)
else:
print "The value of '%s' is: %s" % (sent, m1.evaluate(sent, g1))
def folmodel(trace=None):
"""Example of a first-order model."""
global val2, v2, dom2, m2, g2
val2 = Valuation()
v2 = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),\
('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
val2.read(v2)
dom2 = val2.domain
m2 = Model(dom2, val2)
g2 = Assignment(dom2, {'x': 'b1', 'y': 'g2'})
if trace:
print "*" * mult
print "Model m2\n", m2
print "*" * mult
symbols = ['adam', 'girl', 'love', 'walks', 'x', 'y', 'z']
if trace:
for s in symbols:
try:
print "The interpretation of '%s' in m2 is %s" % (s, m2.i(s, g2))
except Undefined:
print "The interpretation of '%s' in m2 is Undefined" % s
def foldemo(trace=None):
"""Interpretation of formulas in a first-order model."""
folmodel()
print
print '*' * mult
print "FOL Formulas Demo"
print '*' * mult
formulas = [
'(love adam betty)',
'(adam = mia)',
'some z1. (boy z1)',
'some x. ((boy x) and (not (x = adam)))',
'some x. ((boy x) and all y. (love x y))',
'all x. ((boy x) or (girl x))',
'all x. ((girl x) implies some y. (boy y) and (love y x))', #Every girl loves some boy.
'some x. ((boy x) and all y. ((girl y) implies (love x y)))', #There is some boy that every girl loves.
'some x. ((boy x) and all y. ((girl y) implies (love y x)))', #Some boy loves every girl.
'all x. ((dog x) implies (not (girl x)))',
'some x. some y. ((love y x) and (love y x))'
]
for fmla in formulas:
g2.purge()
if trace:
print
m2.evaluate(fmla, g2, trace)
else:
print "The value of '%s' is: %s" % (fmla, m2.evaluate(fmla, g2))
def satdemo(trace=None):
"""Satisfiers of an open formula in a first order model."""
print
print '*' * mult
print "Satisfiers Demo"
print '*' * mult
folmodel()
formulas = [
'(boy x)',
'(x = x)',
'((boy x) or (girl x))',
'((boy x) and (girl x))',
'(love x adam)',
'(love adam x)',
'(not (x = adam))',
'some z22. (love z22 x)',
'some y. (love x y)',
'all y. ((girl y) implies (love y x))',
'all y. ((girl y) implies (love x y))',
'all y. ((girl y) implies ((boy x) and (love x y)))',
'((boy x) and all y. ((girl y) implies (love y x)))',
'((boy x) and all y. ((girl y) implies (love x y)))',
'((boy x) and some y. ((girl y) and (love x y)))',
'((girl x) implies (dog x))',
'all y. ((dog y) implies (x = y))',
'(not some y. (love x y))',
'some y. ((love y adam) and (love x y))'
]
if trace:
print m2
for fmla in formulas:
g2.purge()
print "The satisfiers of '%s' are: %s" % (fmla, m2.satisfiers(fmla, 'x', g2, trace))
def demo(num, trace=None):
"""
Run some demos.
- num = 1: propositional logic demo
- num = 2: first order model demo (only if trace is set)
- num = 3: first order sentences demo
- num = 4: satisfaction of open formulas demo
- any other value: run all the demos
@param trace: trace = 1, or trace = 2 for more verbose tracing
"""
demos = {1: propdemo,
2: folmodel,
3: foldemo,
4: satdemo}
try:
demos[num](trace=trace)
except KeyError:
for num in demos.keys():
demos[num](trace=trace)
if __name__ == "__main__":
demo(5, trace=0)
print '*' * mult
test(verbosity=2)
| Python |
# Natural Language Toolkit: Logic
#
# Based on church.py, Version 1.0
# Available from http://www.alcyone.com/pyos/church/
# Copyright (C) 2001-2002 Erik Max Francis
# Author: Erik Max Francis <max@alcyone.com>
#
# Modifications by: Steven Bird <sb@csse.unimelb.edu.au>
# Peter Wang
# Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Iid:$
"""
A version of first order logic, built on top of the untyped lambda calculus.
The class of C{Expression} has various subclasses:
- C{VariableExpression}
"""
from en.parser.nltk_lite.utilities import Counter
class Error(Exception): pass
class Variable:
"""A variable, either free or bound."""
def __init__(self, name):
"""
Create a new C{Variable}.
@type name: C{string}
@param name: The name of the variable.
"""
self.name = name
def __eq__(self, other):
return self.equals(other)
def __ne__(self, other):
return not self.equals(other)
def equals(self, other):
"""A comparison function."""
assert isinstance(other, Variable)
return self.name == other.name
def __str__(self): return self.name
def __repr__(self): return "Variable('%s')" % self.name
def __hash__(self): return hash(repr(self))
class Constant:
"""A nonlogical constant."""
def __init__(self, name):
"""
Create a new C{Constant}.
@type name: C{string}
@param name: The name of the constant.
"""
self.name = name
def __eq__(self, other):
return self.equals(other)
def __ne__(self, other):
return not self.equals(other)
def equals(self, other):
"""A comparison function."""
assert isinstance(other, Constant)
return self.name == other.name
def __str__(self): return self.name
def __repr__(self): return "Constant('%s')" % self.name
def __hash__(self): return hash(repr(self))
class Expression:
"""The abstract class of a lambda calculus expression."""
def __init__(self):
if self.__class__ is Expression:
raise NotImplementedError
def __eq__(self, other):
return self.equals(other)
def __ne__(self, other):
return not self.equals(other)
def equals(self, other):
"""Are the two expressions equal, modulo alpha conversion?"""
return NotImplementedError
def variables(self):
"""Set of all variables."""
raise NotImplementedError
def free(self):
"""Set of free variables."""
raise NotImplementedError
def subterms(self):
"""Set of all subterms (including self)."""
raise NotImplementedError
def replace(self, variable, expression):
"""Replace all instances of variable v with expression E in self,
where v is free in self."""
raise NotImplementedError
def simplify(self):
"""Evaluate the form by repeatedly applying applications."""
raise NotImplementedError
def skolemise(self):
"""
Perform a simple Skolemisation operation. Existential quantifiers are
simply dropped and all variables they introduce are renamed so that
they are unique.
"""
return self._skolemise(set(), Counter())
def _skolemise(self, bound_vars, counter):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
def __hash__(self):
raise NotImplementedError
class VariableExpression(Expression):
"""A variable expression which consists solely of a variable."""
def __init__(self, variable):
Expression.__init__(self)
assert isinstance(variable, Variable)
self.variable = variable
def equals(self, other):
"""
Allow equality between instances of C{VariableExpression} and
C{IndVariableExpression.
"""
if isinstance(self, VariableExpression) and \
isinstance(other, VariableExpression):
return self.variable.equals(other.variable)
else:
return False
def variables(self):
return set([self.variable])
def free(self):
return set([self.variable])
def subterms(self):
return set([self])
def replace(self, variable, expression):
if self.variable.equals(variable):
return expression
else:
return self
def simplify(self):
return self
def infixify(self):
return self
def name(self):
return self.__str__()
def _skolemise(self, bound_vars, counter):
return self
def __str__(self): return '%s' % self.variable
def __repr__(self): return "VariableExpression('%s')" % self.variable
def __hash__(self): return hash(repr(self))
def is_indvar(expr):
"""
Check whether an expression has the form of an individual variable.
An individual variable matches the following regex:
C{'^[wxyz](\d*)'}.
@rtype: Boolean
@param expr: String
"""
result = expr[0] in ['w', 'x', 'y', 'z']
if len(expr) > 1:
return result and expr[1:].isdigit()
else:
return result
class IndVariableExpression(VariableExpression):
"""
An individual variable expression, as determined by C{is_indvar()}.
"""
def __init__(self, variable):
Expression.__init__(self)
assert isinstance(variable, Variable), "Not a Variable: %s" % variable
assert is_indvar(str(variable)), "Wrong format for an Individual Variable: %s" % variable
self.variable = variable
def __repr__(self): return "IndVariableExpression('%s')" % self.variable
class ConstantExpression(Expression):
"""A constant expression, consisting solely of a constant."""
def __init__(self, constant):
Expression.__init__(self)
assert isinstance(constant, Constant)
self.constant = constant
def equals(self, other):
if self.__class__ == other.__class__:
return self.constant.equals(other.constant)
else:
return False
def variables(self):
return set()
def free(self):
return set()
def subterms(self):
return set([self])
def replace(self, variable, expression):
return self
def simplify(self):
return self
def infixify(self):
return self
def name(self):
return self.__str__()
def _skolemise(self, bound_vars, counter):
return self
def __str__(self): return '%s' % self.constant
def __repr__(self): return "ConstantExpression('%s')" % self.constant
def __hash__(self): return hash(repr(self))
class Operator(ConstantExpression):
"""
A boolean operator, such as 'not' or 'and', or the equality
relation ('=').
"""
def __init__(self, operator):
Expression.__init__(self)
assert operator in Parser.OPS
self.constant = operator
self.operator = operator
def equals(self, other):
if self.__class__ == other.__class__:
return self.constant == other.constant
else:
return False
def simplify(self):
return self
def __str__(self): return '%s' % self.operator
def __repr__(self): return "Operator('%s')" % self.operator
class VariableBinderExpression(Expression):
"""A variable binding expression: e.g. \\x.M."""
# for generating "unique" variable names during alpha conversion.
_counter = Counter()
def __init__(self, variable, term):
Expression.__init__(self)
assert isinstance(variable, Variable)
assert isinstance(term, Expression)
self.variable = variable
self.term = term
self.prefix = self.__class__.PREFIX.rstrip()
self.binder = (self.prefix, self.variable.name)
self.body = str(self.term)
def equals(self, other):
r"""
Defines equality modulo alphabetic variance.
If we are comparing \x.M and \y.N, then
check equality of M and N[x/y].
"""
if self.__class__ == other.__class__:
if self.variable == other.variable:
return self.term == other.term
else:
# Comparing \x.M and \y.N.
# Relabel y in N with x and continue.
relabeled = self._relabel(other)
return self.term == relabeled
else:
return False
def _relabel(self, other):
"""
Relabel C{other}'s bound variables to be the same as C{self}'s
variable.
"""
var = VariableExpression(self.variable)
return other.term.replace(other.variable, var)
def variables(self):
return set([self.variable]).union(self.term.variables())
def free(self):
return self.term.free().difference(set([self.variable]))
def subterms(self):
return self.term.subterms().union([self])
def replace(self, variable, expression):
if self.variable == variable:
return self
if self.variable in expression.free():
v = 'z' + str(self._counter.get())
self = self.alpha_convert(Variable(v))
return self.__class__(self.variable, \
self.term.replace(variable, expression))
def alpha_convert(self, newvar):
"""
Rename all occurrences of the variable introduced by this variable
binder in the expression to @C{newvar}.
"""
term = self.term.replace(self.variable, VariableExpression(newvar))
return self.__class__(newvar, term)
def simplify(self):
return self.__class__(self.variable, self.term.simplify())
def infixify(self):
return self.__class__(self.variable, self.term.infixify())
def __str__(self, continuation=0):
# Print \x.\y.M as \x y.M.
if continuation:
prefix = ' '
else:
prefix = self.__class__.PREFIX
if self.term.__class__ == self.__class__:
return '%s%s%s' % (prefix, self.variable, self.term.__str__(1))
else:
return '%s%s.%s' % (prefix, self.variable, self.term)
def __hash__(self):
return hash(repr(self))
class LambdaExpression(VariableBinderExpression):
"""A lambda expression: \\x.M."""
PREFIX = '\\'
def _skolemise(self, bound_vars, counter):
bv = bound_vars.copy()
bv.add(self.variable)
return self.__class__(self.variable, self.term._skolemise(bv, counter))
def __repr__(self):
return "LambdaExpression('%s', '%s')" % (self.variable, self.term)
class SomeExpression(VariableBinderExpression):
"""An existential quantification expression: some x.M."""
PREFIX = 'some '
def _skolemise(self, bound_vars, counter):
if self.variable in bound_vars:
var = Variable("_s" + str(counter.get()))
term = self.term.replace(self.variable, VariableExpression(var))
else:
var = self.variable
term = self.term
bound_vars.add(var)
return term._skolemise(bound_vars, counter)
def __repr__(self):
return "SomeExpression('%s', '%s')" % (self.variable, self.term)
class AllExpression(VariableBinderExpression):
"""A universal quantification expression: all x.M."""
PREFIX = 'all '
def _skolemise(self, bound_vars, counter):
bv = bound_vars.copy()
bv.add(self.variable)
return self.__class__(self.variable, self.term._skolemise(bv, counter))
def __repr__(self):
return "AllExpression('%s', '%s')" % (self.variable, self.term)
class ApplicationExpression(Expression):
"""An application expression: (M N)."""
def __init__(self, first, second):
Expression.__init__(self)
assert isinstance(first, Expression)
assert isinstance(second, Expression)
self.first = first
self.second = second
def equals(self, other):
if self.__class__ == other.__class__:
return self.first.equals(other.first) and \
self.second.equals(other.second)
else:
return False
def variables(self):
return self.first.variables().union(self.second.variables())
def free(self):
return self.first.free().union(self.second.free())
def _functor(self):
if isinstance(self.first, ApplicationExpression):
return self.first._functor()
else:
return self.first
fun = property(_functor,
doc="Every ApplicationExpression has a functor.")
def _operator(self):
functor = self._functor()
if isinstance(functor, Operator):
return str(functor)
else:
raise AttributeError
op = property(_operator,
doc="Only some ApplicationExpressions have operators." )
def _arglist(self):
"""Uncurry the argument list."""
arglist = [str(self.second)]
if isinstance(self.first, ApplicationExpression):
arglist.extend(self.first._arglist())
return arglist
def _args(self):
arglist = self._arglist()
arglist.reverse()
return arglist
args = property(_args,
doc="Every ApplicationExpression has args.")
def subterms(self):
first = self.first.subterms()
second = self.second.subterms()
return first.union(second).union(set([self]))
def replace(self, variable, expression):
return self.__class__(self.first.replace(variable, expression),\
self.second.replace(variable, expression))
def simplify(self):
first = self.first.simplify()
second = self.second.simplify()
if isinstance(first, LambdaExpression):
variable = first.variable
term = first.term
return term.replace(variable, second).simplify()
else:
return self.__class__(first, second)
def infixify(self):
first = self.first.infixify()
second = self.second.infixify()
if isinstance(first, Operator) and not str(first) == 'not':
return self.__class__(second, first)
else:
return self.__class__(first, second)
def _skolemise(self, bound_vars, counter):
first = self.first._skolemise(bound_vars, counter)
second = self.second._skolemise(bound_vars, counter)
return self.__class__(first, second)
def __str__(self):
# Print ((M N) P) as (M N P).
strFirst = str(self.first)
if isinstance(self.first, ApplicationExpression):
if not isinstance(self.second, Operator):
strFirst = strFirst[1:-1]
return '(%s %s)' % (strFirst, self.second)
def __repr__(self): return "ApplicationExpression('%s', '%s')" % (self.first, self.second)
def __hash__(self): return hash(repr(self))
class Parser:
"""A lambda calculus expression parser."""
# Tokens.
LAMBDA = '\\'
SOME = 'some'
ALL = 'all'
DOT = '.'
OPEN = '('
CLOSE = ')'
BOOL = ['and', 'or', 'not', 'implies', 'iff']
EQ = '='
OPS = BOOL
OPS.append(EQ)
def __init__(self, data=None, constants=None):
if data is not None:
self.buffer = data
self.process()
else:
self.buffer = ''
if constants is not None:
self.constants = constants
else:
self.constants = []
def feed(self, data):
"""Feed another batch of data to the parser."""
self.buffer += data
self.process()
def parse(self, data):
"""
Provides a method similar to other NLTK parsers.
@type data: str
@returns: a parsed Expression
"""
self.feed(data)
result = self.next()
return result
def process(self):
"""Process the waiting stream to make it trivial to parse."""
self.buffer = self.buffer.replace('\t', ' ')
self.buffer = self.buffer.replace('\n', ' ')
self.buffer = self.buffer.replace('\\', ' \\ ')
self.buffer = self.buffer.replace('.', ' . ')
self.buffer = self.buffer.replace('(', ' ( ')
self.buffer = self.buffer.replace(')', ' ) ')
def token(self, destructive=1):
"""Get the next waiting token. The destructive flag indicates
whether the token will be removed from the buffer; setting it to
0 gives lookahead capability."""
if self.buffer == '':
raise Error, "end of stream"
tok = None
buffer = self.buffer
while not tok:
seq = buffer.split(' ', 1)
if len(seq) == 1:
tok, buffer = seq[0], ''
else:
assert len(seq) == 2
tok, buffer = seq
if tok:
if destructive:
self.buffer = buffer
return tok
assert 0 # control never gets here
return None
def isVariable(self, token):
"""Is this token a variable (that is, not one of the other types)?"""
TOKENS = [Parser.LAMBDA, Parser.SOME, Parser.ALL,
Parser.DOT, Parser.OPEN, Parser.CLOSE, Parser.EQ]
TOKENS.extend(self.constants)
TOKENS.extend(Parser.BOOL)
return token not in TOKENS
def next(self):
"""Parse the next complete expression from the stream and return it."""
tok = self.token()
if tok in [Parser.LAMBDA, Parser.SOME, Parser.ALL]:
# Expression is a lambda expression: \x.M
# or a some expression: some x.M
if tok == Parser.LAMBDA:
factory = LambdaExpression
elif tok == Parser.SOME:
factory = SomeExpression
elif tok == Parser.ALL:
factory = AllExpression
else:
raise ValueError(tok)
vars = [self.token()]
while self.isVariable(self.token(0)):
# Support expressions like: \x y.M == \x.\y.M
# and: some x y.M == some x.some y.M
vars.append(self.token())
tok = self.token()
if tok != Parser.DOT:
raise Error, "parse error, unexpected token: %s" % tok
term = self.next()
accum = factory(Variable(vars.pop()), term)
while vars:
accum = factory(Variable(vars.pop()), accum)
return accum
elif tok == Parser.OPEN:
# Expression is an application expression: (M N)
first = self.next()
second = self.next()
exps = []
while self.token(0) != Parser.CLOSE:
# Support expressions like: (M N P) == ((M N) P)
exps.append(self.next())
tok = self.token() # swallow the close token
assert tok == Parser.CLOSE
if isinstance(second, Operator):
accum = self.make_ApplicationExpression(second, first)
else:
accum = self.make_ApplicationExpression(first, second)
while exps:
exp, exps = exps[0], exps[1:]
accum = self.make_ApplicationExpression(accum, exp)
return accum
elif tok in self.constants:
# Expression is a simple constant expression: a
return ConstantExpression(Constant(tok))
elif tok in Parser.OPS:
# Expression is a boolean operator or the equality symbol
return Operator(tok)
elif is_indvar(tok):
# Expression is a boolean operator or the equality symbol
return IndVariableExpression(Variable(tok))
else:
if self.isVariable(tok):
# Expression is a simple variable expression: x
return VariableExpression(Variable(tok))
else:
raise Error, "parse error, unexpected token: %s" % tok
# This is intended to be overridden, so that you can derive a Parser class
# that constructs expressions using your subclasses. So far we only need
# to overridde ApplicationExpression, but the same thing could be done for
# other expression types.
def make_ApplicationExpression(self, first, second):
return ApplicationExpression(first, second)
def expressions():
"""Return a sequence of test expressions."""
a = Variable('a')
x = Variable('x')
y = Variable('y')
z = Variable('z')
A = VariableExpression(a)
X = IndVariableExpression(x)
Y = IndVariableExpression(y)
Z = IndVariableExpression(z)
XA = ApplicationExpression(X, A)
XY = ApplicationExpression(X, Y)
XZ = ApplicationExpression(X, Z)
YZ = ApplicationExpression(Y, Z)
XYZ = ApplicationExpression(XY, Z)
I = LambdaExpression(x, X)
K = LambdaExpression(x, LambdaExpression(y, X))
L = LambdaExpression(x, XY)
S = LambdaExpression(x, LambdaExpression(y, LambdaExpression(z, \
ApplicationExpression(XZ, YZ))))
B = LambdaExpression(x, LambdaExpression(y, LambdaExpression(z, \
ApplicationExpression(X, YZ))))
C = LambdaExpression(x, LambdaExpression(y, LambdaExpression(z, \
ApplicationExpression(XZ, Y))))
O = LambdaExpression(x, LambdaExpression(y, XY))
N = ApplicationExpression(LambdaExpression(x, XA), I)
T = Parser('\\x y.(x y z)').next()
return [X, XZ, XYZ, I, K, L, S, B, C, O, N, T]
def main():
p = Variable('p')
q = Variable('q')
P = VariableExpression(p)
Q = VariableExpression(q)
for l in expressions():
print "Expression:", l
print "Variables:", l.variables()
print "Free:", l.free()
print "Subterms:", l.subterms()
print "Simplify:",l.simplify()
la = ApplicationExpression(ApplicationExpression(l, P), Q)
las = la.simplify()
print "Apply and simplify: %s -> %s" % (la, las)
ll = Parser(str(l)).next()
print 'l is:', l
print 'll is:', ll
assert l.equals(ll)
print "Serialize and reparse: %s -> %s" % (l, ll)
print
def runtests():
# Test a beta-reduction which used to be wrong
l = Parser(r'(\x.\x.(x x) 1)').next().simplify()
id = Parser(r'\x.(x x)').next()
assert l == id
# Test numerals
zero = Parser(r'\f x.x').next()
one = Parser(r'\f x.(f x)').next()
two = Parser(r'\f x.(f (f x))').next()
three = Parser(r'\f x.(f (f (f x)))').next()
four = Parser(r'\f x.(f (f (f (f x))))').next()
succ = Parser(r'\n f x.(f (n f x))').next()
plus = Parser(r'\m n f x.(m f (n f x))').next()
mult = Parser(r'\m n f.(m (n f))').next()
pred = Parser(r'\n f x.(n \g h.(h (g f)) \u.x \u.u)').next()
v1 = ApplicationExpression(succ, zero).simplify()
assert v1 == one
v2 = ApplicationExpression(succ, v1).simplify()
assert v2 == two
v3 = ApplicationExpression(ApplicationExpression(plus, v1), v2).simplify()
assert v3 == three
v4 = ApplicationExpression(ApplicationExpression(mult, v2), v2).simplify()
assert v4 == four
v5 = ApplicationExpression(pred, ApplicationExpression(pred, v4)).simplify()
assert v5 == two
# betaConversionTestSuite.pl from
# _Representation and Inference for Natural Language_
#
x1 = Parser(r'(\p.(p mia) \x.(walk x))').next().simplify()
x2 = Parser(r'(walk mia)').next().simplify()
assert x1 == x2
x1 = Parser(r'some x.(and (man x) (\p.some x.(and (woman x) (p x)) \y.(love x y)))').next().simplify()
x2 = Parser(r'some x.(and (man x) some y.(and (woman y) (love x y)))').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.(sleep a) mia)').next().simplify()
x2 = Parser(r'(sleep mia)').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.\b.(like b a) mia)').next().simplify()
x2 = Parser(r'\b.(like b mia)').next().simplify()
assert x1 == x2
x1 = Parser(r'\a.(\b.(like b a) vincent)').next().simplify()
x2 = Parser(r'\a.(like vincent a)').next().simplify()
assert x1 == x2
x1 = Parser(r'\a.(and (\b.(like b a) vincent) (sleep a))').next().simplify()
x2 = Parser(r'\a.(and (like vincent a) (sleep a))').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.\b.(like b a) mia vincent)').next().simplify()
x2 = Parser(r'(like vincent mia)').next().simplify()
assert x1 == x2
x1 = Parser(r'(p (\a.(sleep a) vincent))').next().simplify()
x2 = Parser(r'(p (sleep vincent))').next().simplify()
assert x1 == x2
x1 = Parser(r'\a.(a (\b.(sleep b) vincent))').next().simplify()
x2 = Parser(r'\a.(a (sleep vincent))').next().simplify()
assert x1 == x2
x1 = Parser(r'\a.(a (sleep vincent))').next().simplify()
x2 = Parser(r'\a.(a (sleep vincent))').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.(a vincent) \b.(sleep b))').next().simplify()
x2 = Parser(r'(sleep vincent)').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.(believe mia (a vincent)) \b.(sleep b))').next().simplify()
x2 = Parser(r'(believe mia (sleep vincent))').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.(and (a vincent) (a mia)) \b.(sleep b))').next().simplify()
x2 = Parser(r'(and (sleep vincent) (sleep mia))').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.\b.(and (\c.(c (a vincent)) \d.(probably d)) (\c.(c (b mia)) \d.(improbably d))) \e.(walk e) \e.(talk e)))').next().simplify()
x2 = Parser(r'(and (probably (walk vincent)) (improbably (talk mia)))').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.\b.(\c.(c a b) \d.\e.(love d e)) jules mia)').next().simplify()
x2 = Parser(r'(love jules mia)').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.\b.some c.(and (a c) (b c)) \d.(boxer d) \d.(sleep d))').next().simplify()
x2 = Parser(r'some c.(and (boxer c) (sleep c))').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.(z a) \c.\a.(like a c))').next().simplify()
x2 = Parser(r'(z \c.\a.(like a c))').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.\b.(a b) \c.\b.(like b c))').next().simplify()
x2 = Parser(r'\b.(\c.\b.(like b c) b)').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.\b.(\c.(c a b) \b.\a.(loves b a)) jules mia)').next().simplify()
x2 = Parser(r'(loves jules mia)').next().simplify()
assert x1 == x2
x1 = Parser(r'(\a.\b.(and some b.(a b) (a b)) \c.(boxer c) vincent)').next().simplify()
x2 = Parser(r'(and some b.(boxer b) (boxer vincent))').next().simplify()
assert x1 == x2
if __name__ == '__main__':
runtests()
main()
| Python |
# Natural Language Toolkit: Semantic Interpretation
#
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This package contains classes for representing semantic structure in
formulas of first-order logic and for evaluating such formulas in
set-theoretic models.
"""
from evaluate import *
from logic import *
from utilities import *
| Python |
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (additions)
# Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: probability.py 3498 2006-10-14 05:30:32Z stevenbird $
_NINF = float('-1e300')
"""
Classes for representing and processing probabilistic information.
The L{FreqDist} class is used to encode X{frequency distributions},
which count the number of times that each outcome of an experiment
occurs.
The L{ProbDistI} class defines a standard interface for X{probability
distributions}, which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- X{derived probability distributions} are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- X{analytic probability distributions} are created directly from
parameters (such as variance).
The L{ConditionalFreqDist} class and L{ConditionalProbDistI} interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the C{ConditionalProbDistI} interface is
L{ConditionalProbDist}, a derived distribution.
"""
import types, math
try: import numpy
except: pass
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
class FreqDist(object):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occured. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occured as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> fdist = FreqDist()
>>> for word in tokenize.whitespace(sent):
... fdist.inc(word)
"""
def __init__(self):
"""
Construct a new empty, C{FreqDist}. In particular, the count
for every sample is zero.
"""
self._count = {}
self._N = 0
self._Nr_cache = None
self._max_cache = None
def inc(self, sample, count=1):
"""
Increment this C{FreqDist}'s count for the given
sample.
@param sample: The sample whose count should be incremented.
@type sample: any
@param count: The amount to increment the sample's count by.
@type count: C{int}
@rtype: None
@raise NotImplementedError: If C{sample} is not a
supported sample type.
"""
if count == 0: return
self._N += count
self._count[sample] = self._count.get(sample,0) + count
# Invalidate the Nr cache and max cache.
self._Nr_cache = None
self._max_cache = None
def N(self):
"""
@return: The total number of sample outcomes that have been
recorded by this C{FreqDist}. For the number of unique
sample values (or bins) with counts greater than zero, use
C{FreqDist.B()}.
@rtype: C{int}
"""
return self._N
def B(self):
"""
@return: The total number of sample values (or X{bins}) that
have counts greater than zero. For the total
number of sample outcomes recorded, use C{FreqDist.N()}.
@rtype: C{int}
"""
return len(self._count)
def samples(self):
"""
@return: A list of all samples that have been recorded as
outcomes by this frequency distribution. Use C{count()}
to determine the count for each sample.
@rtype: C{list}
"""
return self._count.keys()
def Nr(self, r, bins=None):
"""
@return: The number of samples with count r.
@rtype: C{int}
@type r: C{int}
@param r: A sample count.
@type bins: C{int}
@param bins: The number of possible sample outcomes. C{bins}
is used to calculate Nr(0). In particular, Nr(0) is
C{bins-self.B()}. If C{bins} is not specified, it
defaults to C{self.B()} (so Nr(0) will be 0).
"""
if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative'
# Special case for Nr(0):
if r == 0:
if bins is None: return 0
else: return bins-self.B()
# We have to search the entire distribution to find Nr. Since
# this is an expensive operation, and is likely to be used
# repeatedly, cache the results.
if self._Nr_cache is None:
self._cache_Nr_values()
if r >= len(self._Nr_cache): return 0
return self._Nr_cache[r]
def _cache_Nr_values(self):
Nr = [0]
for sample in self.samples():
c = self._count.get(sample, 0)
if c >= len(Nr):
Nr += [0]*(c+1-len(Nr))
Nr[c] += 1
self._Nr_cache = Nr
def count(self, sample):
"""
Return the count of a given sample. The count of a sample is
defined as the number of times that sample outcome was
recorded by this C{FreqDist}. Counts are non-negative
integers.
@return: The count of a given sample.
@rtype: C{int}
@param sample: the sample whose count
should be returned.
@type sample: any.
"""
return self._count.get(sample, 0)
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this C{FreqDist}. The count of a sample is defined as the
number of times that sample outcome was recorded by this
C{FreqDist}. Frequencies are always real numbers in the range
[0, 1].
@return: The frequency of a given sample.
@rtype: float
@param sample: the sample whose frequency
should be returned.
@type sample: any
"""
if self._N is 0: return 0
return float(self._count.get(sample, 0)) / self._N
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occured in this
frequency distribution, return C{None}.
@return: The sample with the maximum number of outcomes in this
frequency distribution.
@rtype: any or C{None}
"""
if self._max_cache is None:
best_sample = None
best_count = -1
for sample in self._count.keys():
if self._count[sample] > best_count:
best_sample = sample
best_count = self._count[sample]
self._max_cache = best_sample
return self._max_cache
def sorted_samples(self):
"""
Return the samples sorted in decreasing order of frequency. Instances
with the same count will be arbitrarily ordered. Instances with a
count of zero will be omitted. This method is C{O(N^2)}, where C{N} is
the number of samples, but will complete in a shorter time on average.
@return: The set of samples in sorted order.
@rtype: sequence of any
"""
items = [(-count,sample) for (sample,count) in self._count.items()]
items.sort()
return [sample for (neg_count,sample) in items]
def __repr__(self):
"""
@return: A string representation of this C{FreqDist}.
@rtype: string
"""
return '<FreqDist with %d samples>' % self.N()
def __str__(self):
"""
@return: A string representation of this C{FreqDist}.
@rtype: string
"""
samples = self.sorted_samples()
items = ['%r: %r' % (s, self._count[s]) for s in samples]
return '<FreqDist: %s>' % ', '.join(items)
def __contains__(self, sample):
"""
@return: True if the given sample occurs one or more times in
this frequency distribution.
@rtype: C{boolean}
@param sample: The sample to search for.
@type sample: any
"""
return self._count.has_key(sample)
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. C{ProbDist}s are often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
def __init__(self):
if self.__class__ == ProbDistI:
raise AssertionError, "Interfaces can't be instantiated"
def prob(self, sample):
"""
@return: the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
@rtype: float
@param sample: The sample whose probability
should be returned.
@type sample: any
"""
raise AssertionError()
def logprob(self, sample):
"""
@return: the natural logarithm of the probability for a given
sample. Log probabilities range from negitive infinity to
zero.
@rtype: float
@param sample: The sample whose probability
should be returned.
@type sample: any
"""
# Default definition, in terms of prob()
p = self.prob(sample)
if p == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return math.log(p)
def max(self):
"""
@return: the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
@rtype: any
"""
raise AssertionError()
def samples(self):
"""
@return: A list of all samples that have nonzero
probabilities. Use C{prob} to find the probability of
each sample.
@rtype: C{list}
"""
raise AssertionError()
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in C{samples}.
@param samples: The samples that should be given uniform
probability.
@type samples: C{list}
@raise ValueError: If C{samples} is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
if sample in self._sampleset: return self._prob
else: return 0
def max(self): return self._samples[0]
def samples(self): return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if C{log} is true). If C{normalize} is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
"""
self._prob_dict = prob_dict.copy()
self._log = log
# Normalize the distribution, if requested.
if normalize:
if log:
value_sum = sum_logs(self._prob_dict.values())
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict.keys()))
for x in prob_dict.keys():
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict.keys())
for x in prob_dict.keys():
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
if sample not in self._prob_dict: return 0
else: return math.exp(self._prob_dict[sample])
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
else: return math.log(self._prob_dict[sample])
def max(self):
if not hasattr(self, '_max'):
self._max = max([(p,v) for (v,p) in self._prob_dict.items()])[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
X{maximum likelihood estimate} approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
if freqdist.N() == 0:
raise ValueError('An MLE probability distribution must '+
'have at least one sample.')
self._freqdist = freqdist
def freqdist(self):
"""
@return: The frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
C{Lidstone estimate} is paramaterized by a real number M{gamma},
which typically ranges from 0 to 1. The X{Lidstone estimate}
approximates the probability of a sample with count M{c} from an
experiment with M{N} outcomes and M{B} bins as
M{(c+gamma)/(N+B*gamma)}. This is equivalant to adding
M{gamma} to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type gamma: C{float}
@param gamma: A real number used to paramaterize the
estimate. The Lidstone estimate is equivalant to adding
M{gamma} to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s must be ' % name +
'greater than or equal to\nthe number of '+
'bins in the FreqDist used to create it.')
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None: bins = freqdist.B()
self._bins = bins
def freqdist(self):
"""
@return: The frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist.count(sample)
return (c + self._gamma) / (self._N + self._bins * self._gamma)
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
X{Lidstone estimate} approximates the probability of a sample with
count M{c} from an experiment with M{N} outcomes and M{B} bins as
M{(c+1)/(N+B)}. This is equivalant to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
X{expected likelihood estimate} approximates the probability of a
sample with count M{c} from an experiment with M{N} outcomes and
M{B} bins as M{(c+0.5)/(N+B/2)}. This is equivalant to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
X{heldout estimate} uses uses the X{heldout frequency
distribution} to predict the probability of each sample, given its
frequency in the X{base frequency distribution}.
In particular, the heldout estimate approximates the probability
for a sample that occurs M{r} times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur M{r} times in the base distribution.
This average frequency is M{Tr[r]/(Nr[r]*N)}, where:
- M{Tr[r]} is the total count in the heldout distribution for
all samples that occur M{r} times in the base
distribution.
- M{Nr[r]} is the number of samples that occur M{r} times in
the base distribution.
- M{N} is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the C{prob} member
function, M{Tr[r]/(Nr[r]*N)} is precomputed for each value of M{r}
when the C{HeldoutProbDist} is created.
@type _estimate: C{list} of C{float}
@ivar _estimate: A list mapping from M{r}, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. C{_estimate[M{r}]} is
calculated by finding the average frequency in the heldout
distribution of all samples that occur M{r} times in the base
distribution. In particular, C{_estimate[M{r}]} =
M{Tr[r]/(Nr[r]*N)}.
@type _max_r: C{int}
@ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. C{_max_r} is used to decide how
large C{_estimate} must be.
"""
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate C{base_fdist} and
C{heldout_fdist}.
@type base_fdist: C{FreqDist}
@param base_fdist: The base frequency distribution.
@type heldout_fdist: C{FreqDist}
@param heldout_fdist: The heldout frequency distribution.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist.count(base_fdist.max())
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
@return: the list M{Tr}, where M{Tr[r]} is the total count in
C{heldout_fdist} for all samples that occur M{r}
times in C{base_fdist}.
@rtype: C{list} of C{float}
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist.samples():
r = self._base_fdist.count(sample)
Tr[r] += self._heldout_fdist.count(sample)
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
@return: the list M{estimate}, where M{estimate[r]} is the
probability estimate for any sample that occurs M{r} times
in the base frequency distribution. In particular,
M{estimate[r]} is M{Tr[r]/(N[r]*N)}. In the special case
that M{N[r]=0}, M{estimate[r]} will never be used; so we
define M{estimate[r]=None} for those cases.
@rtype: C{list} of C{float}
@type Tr: C{list} of C{float}
@param Tr: the list M{Tr}, where M{Tr[r]} is the total count in
the heldout distribution for all samples that occur M{r}
times in base distribution.
@type Nr: C{list} of C{float}
@param Nr: The list M{Nr}, where M{Nr[r]} is the number of
samples that occur M{r} times in the base distribution.
@type N: C{int}
@param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
@return: The base frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._base_fdist
def heldout_fdist(self):
"""
@return: The heldout frequency distribution that this
probability distribution is based on.
@rtype: C{FreqDist}
"""
return self._heldout_fdist
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist.count(sample)
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The X{cross-validation estimate} for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
C{freqdists}.
@type freqdists: C{list} of C{FreqDist}
@param freqdists: A list of the frequency distributions
generated by the experiment.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
@rtype: C{list} of C{FreqDist}
@return: The list of frequency distributions that this
C{ProbDist} is based on.
"""
return self._freqdists
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to:
- M{T / (N + T)}
where M{T} is the number of observed event types and M{N} is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occuring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- M{p = T / Z (N + T)}, if count = 0
- M{p = c / (N + T)}, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once.
The probability mass reserved for unseen events is equal to:
- M{T / (N + T)}
where M{T} is the number of observed event types and M{N} is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occuring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- M{p = T / Z (N + T)}, if count = 0
- M{p = c / (N + T)}, otherwise
The parameters M{T} and M{N} are taken from the C{freqdist} parameter
(the C{B()} and C{N()} values). The normalising factor M{Z} is
calculated using these values along with the C{bins} parameter.
@param freqdist: The frequency counts upon which to base the
estimation.
@type freqdist: C{FreqDist}
@param bins: The number of possible event types. This must be
at least as large as the number of bins in the
C{freqdist}. If C{None}, then it's assumed to be
equal to that of the C{freqdist}
@type bins: C{Int}
"""
assert bins == None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins == None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist.count(sample)
if c == 0:
return self._T / float(self._Z * (self._N + self._T))
else:
return c / float(self._N + self._T)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
class GoodTuringProbDist(ProbDistI):
"""
The Good-Turing estimate of a probability distribution. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count M{c*}:
- M{c* = (c + 1) N(c + 1) / N(c)}
where M{c} is the original count, M{N(i)} is the number of event types
observed with count M{i}. These smoothed counts are then normalised to
yield a probability distribution.
"""
# TODO - add a cut-off parameter, above which the counts are unmodified
# (see J&M p216)
def __init__(self, freqdist, bins):
"""
Creates a Good-Turing probability distribution estimate. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count M{c*}:
- M{c* = (c + 1) N(c + 1) / N(c)}
where M{c} is the original count, M{N(i)} is the number of event types
observed with count M{i}. These smoothed counts are then normalised to
yield a probability distribution.
The C{bins} parameter allows C{N(0)} to be estimated.
@param freqdist: The frequency counts upon which to base the
estimation.
@type freqdist: C{FreqDist}
@param bins: The number of possible event types. This must be
at least as large as the number of bins in the
C{freqdist}. If C{None}, then it's taken to be
equal to C{freqdist.B()}.
@type bins: C{Int}
"""
assert bins == None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins == None:
bins = freqdist.B()
self._freqdist = freqdist
self._bins = bins
def prob(self, sample):
# inherit docs from FreqDist
c = self._freqdist.count(sample)
nc = self._freqdist.Nr(c, self._bins)
ncn = self._freqdist.Nr(c + 1, self._bins)
# avoid divide-by-zero errors for sparse datasets
if nc == 0 or self._freqdist.N() == 0:
return 0.0
return float(c + 1) * ncn / (nc * self._freqdist.N())
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
@param prob_dist: the distribution from which to garner the
probabilities
@type prob_dist: ProbDist
@param samples: the complete set of samples
@type samples: sequence of any
@param store_logs: whether to store the probabilities as logarithms
@type store_logs: bool
"""
self._samples = samples
self._sample_dict = dict([(samples[i], i) for i in range(len(samples))])
try: self._data = numpy.zeros(len(samples), numpy.Float64)
except: pass
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i != None:
if self._logs:
return exp(self._data[i])
else:
return self._data[i]
else:
return 0.0
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i != None:
if self._logs:
return self._data[i]
else:
return log(self._data[i])
else:
return float('-inf')
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
@param sample: the sample for which to update the probability
@type sample: C{any}
@param prob: the new probability
@type prob: C{float}
@param log: is the probability already logged
@type log: C{bool}
"""
i = self._sample_dict.get(sample)
assert i != None
if self._logs:
if log: self._data[i] = prob
else: self._data[i] = log(prob)
else:
if log: self._data[i] = exp(prob)
else: self._data[i] = prob
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
# Is this right?
return sum([actual_pdist.prob(s) * math.log(test_pdist.prob(s))
for s in actual_pdist.samples()])
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
class ConditionalFreqDist(object):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occured, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
C{FreqDist} for the experiment under that condition.
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
<FreqDist with 73 outcomes>
>>> cfdist[3].freq('the')
0.4
>>> cfdist[3].count('dog')
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
C{ConditionalFreqDist} creates a new empty C{FreqDist} for that
condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> cfdist = ConditionalFreqDist()
>>> for word in tokenize.whitespace(sent):
... condition = len(word)
... cfdist[condition].inc(word)
"""
def __init__(self):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
"""
self._fdists = {}
def __getitem__(self, condition):
"""
Return the frequency distribution that encodes the frequency
of each sample outcome, given that the experiment was run
under the given condition. If the frequency distribution for
the given condition has not been accessed before, then this
will create a new empty C{FreqDist} for that condition.
@return: The frequency distribution that encodes the frequency
of each sample outcome, given that the experiment was run
under the given condition.
@rtype: C{FreqDist}
@param condition: The condition under which the experiment was
run.
@type condition: any
"""
# Create the conditioned freq dist, if it doesn't exist
if not self._fdists.has_key(condition):
self._fdists[condition] = FreqDist()
return self._fdists[condition]
def conditions(self):
"""
@return: A list of the conditions that have been accessed for
this C{ConditionalFreqDist}. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
@rtype: C{list}
"""
return self._fdists.keys()
def __repr__(self):
"""
@return: A string representation of this
C{ConditionalFreqDist}.
@rtype: C{string}
"""
n = len(self._fdists)
return '<ConditionalFreqDist with %d conditions>' % n
class ConditionalProbDistI(object):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the C{ProbDist} for the experiment under that
condition.
"""
def __init__(self):
raise AssertionError, 'ConditionalProbDistI is an interface'
def __getitem__(self, condition):
"""
@return: The probability distribution for the experiment run
under the given condition.
@rtype: C{ProbDistI}
@param condition: The condition whose probability distribution
should be returned.
@type condition: any
"""
raise AssertionError
def conditions(self):
"""
@return: A list of the conditions that are represented by
this C{ConditionalProbDist}. Use the indexing operator to
access the probability distribution for a given condition.
@rtype: C{list}
"""
raise AssertionError
# For now, this is the only implementation of ConditionalProbDistI;
# but we would want a different implementation if we wanted to build a
# conditional probability distribution analytically (e.g., a gaussian
# distribution), rather than basing it on an underlying frequency
# distribution.
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modelling the experiments
that were used to generate a conditional frequency distribution.
A C{ConditoinalProbDist} is constructed from a
C{ConditionalFreqDist} and a X{C{ProbDist} factory}:
- The B{C{ConditionalFreqDist}} specifies the frequency
distribution for each condition.
- The B{C{ProbDist} factory} is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A C{ProbDist} class's name (such as
C{MLEProbDist} or C{HeldoutProbDist}) can be used to specify
that class's constructor.
The first argument to the C{ProbDist} factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the C{factory_args} parameter to the
C{ConditionalProbDist} constructor. For example, the following
code constructs a C{ConditionalProbDist}, where the probability
distribution for each condition is an C{ELEProbDist} with 10 bins:
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> print cpdist['run'].max()
'NN'
>>> print cpdist['run'].prob('NN')
0.0813
"""
def __init__(self, cfdist, probdist_factory,
supply_condition=False, *factory_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and C{ProbDist}
factory.
@type cfdist: L{ConditionalFreqDist}
@param cfdist: The C{ConditionalFreqDist} specifying the
frequency distribution for each condition.
@type probdist_factory: C{class} or C{function}
@param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument, the condition as its
second argument (only if C{supply_condition=True}), and
C{factory_args} as its remaining arguments.
@type supply_condition: C{bool}
@param supply_condition: If true, then pass the condition as
the second argument to C{probdist_factory}.
@type factory_args: (any)
@param factory_args: Extra arguments for C{probdist_factory}.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
"""
self._probdist_factory = probdist_factory
self._cfdist = cfdist
self._supply_condition = supply_condition
self._factory_args = factory_args
self._pdists = {}
for c in cfdist.conditions():
if supply_condition:
pdist = probdist_factory(cfdist[c], c, *factory_args)
else:
pdist = probdist_factory(cfdist[c], *factory_args)
self._pdists[c] = pdist
def __getitem__(self, condition):
if not self._pdists.has_key(condition):
# If it's a condition we haven't seen, create a new prob
# dist from the empty freq dist. Typically, this will
# give a uniform prob dist.
pdist = self._probdist_factory(FreqDist(), *self._factory_args)
self._pdists[condition] = pdist
return self._pdists[condition]
def conditions(self):
return self._pdists.keys()
def __repr__(self):
"""
@return: A string representation of this
C{ConditionalProbDist}.
@rtype: C{string}
"""
n = len(self._pdists)
return '<ConditionalProbDist with %d conditions>' % n
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
@param probdist_dict: a dictionary containing the probdists indexed
by the conditions
@type probdist_dict: dict any -> probdist
"""
self._dict = probdist_dict
def __getitem__(self, condition):
# inherit documentation
# this will cause an exception for unseen conditions
return self._dict[condition]
def conditions(self):
# inherit documentation
return self._dict.keys()
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30)
def add_logs(logx, logy):
"""
Given two numbers C{logx}=M{log(x)} and C{logy}=M{log(y)}, return
M{log(x+y)}. Conceptually, this is the same as returning
M{log(exp(C{logx})+exp(C{logy}))}, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(math.exp(logx-base) + math.exp(logy-base))
def sum_logs(logs):
if len(logs) == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return reduce(add_logs, logs[1:], logs[0])
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the C{ProbabilisticMixIn} class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
L{constructor<__init__>} for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. C{prob} should generally be
the first argument for those constructors.
@kwparam prob: The probability associated with the object.
@type prob: C{float}
@kwparam logprob: The log of the probability associated with
the object.
@type logrpob: C{float}
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to C{prob}.
@param prob: The new probability
@type prob: C{float}
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
C{logprob}. I.e., set the probability associated with this
object to C{exp(logprob)}.
@param logprob: The new log probability
@type logprob: C{float}
"""
self.__logprob = prob
self.__prob = None
def prob(self):
"""
@return: The probability associated with this object.
@rtype: C{float}
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = math.exp(self.__logprob)
return self.__prob
def logprob(self):
"""
@return: C{log(p)}, where C{p} is the probability associated
with this object.
@rtype: C{float}
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
def set_logprob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to C{numsamples}, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
from math import sqrt
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1+numsamples)/2) +
random.randint(0, numsamples/2))
fdist.inc(y)
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
C{_create_rand_fdist(numsamples, x)}.
"""
fdist = FreqDist()
for x in range(1, (1+numsamples)/2+1):
for y in range(0, numsamples/2+1):
fdist.inc(x+y)
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
C{numsamples} samples. Each frequency distribution is sampled
C{numoutcomes} times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
@type numsamples: C{int}
@param numsamples: The number of samples to use in each demo
frequency distributions.
@type numoutcomes: C{int}
@param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
C{numsamples} bins.
@rtype: C{None}
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print ('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes))
print '='*9*(len(pdists)+2)
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print FORMATSTR % tuple([`pdist`[1:9] for pdist in pdists[:-1]])
print '-'*9*(len(pdists)+2)
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print FORMATSTR % val
# Print the totals for each column (should all be 1.0)
zvals = zip(*vals)
def sum(lst): return reduce(lambda x,y:x+y, lst, 0)
sums = [sum(val) for val in zvals[1:]]
print '-'*9*(len(pdists)+2)
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print FORMATSTR % tuple(sums)
print '='*9*(len(pdists)+2)
# Display the distributions themselves, if they're short enough.
if len(`str(fdist1)`) < 70:
print ' fdist1:', str(fdist1)
print ' fdist2:', str(fdist2)
print ' fdist3:', str(fdist3)
print
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
| Python |
# Natural Language Toolkit: N-Gram Taggers
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (minor additions)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for tagging each token of a document with
supplementary information, such as its part of speech or its WordNet
synset tag. This task, which is known as X{tagging}, is defined by
the L{TagI} interface.
"""
import types, re
from collections import deque
from en.parser.nltk_lite.probability import FreqDist, ConditionalFreqDist
from en.parser.nltk_lite.tag import *
##############################################################
# N-GRAM TAGGERS: these make use of history
##############################################################
class Window(deque):
def __init__(self, width):
deque.__init__(self, (None,) * width)
self._width = width
def clear(self):
deque.clear(self)
self.extend((None,) * self._width)
def append(self, item):
self.rotate(-1)
self[-1] = item
def set(self, items):
deque.clear(self)
if len(items) >= self._width:
# restrict to required width
self.extend(items[-self._width:])
else:
# pad to required width
self.extend(items + (None,) * (self._width - 1 - len(items)))
class Ngram(SequentialBackoff):
"""
An I{n}-gram stochastic tagger. Before an C{tagger.Ngram}
can be used, it should be trained on a tagged corpus. Using this
training data, it will construct a frequency distribution
describing the frequencies with each word is tagged in different
contexts. The context considered consists of the word to be
tagged and the I{n-1} previous words' tags. Once the tagger has been
trained, it uses this frequency distribution to tag words by
assigning each word the tag with the maximum frequency given its
context. If the C{tagger.Ngram} encounters a word in a context
for which it has no data, it will assign it the tag C{None}.
"""
def __init__(self, n, cutoff=1, backoff=None):
"""
Construct an I{n}-gram stochastic tagger. The tagger must be trained
using the L{train()} method before being used to tag data.
@param n: The order of the new C{tagger.Ngram}.
@type n: int
@type cutoff: C{int}
@param cutoff: A count-cutoff for the tagger's frequency
distribution. If the tagger saw fewer than
C{cutoff} examples of a given context in training,
then it will return a tag of C{None} for that context.
"""
if n < 2: raise ValueError('n must be greater than 1')
self._model = {}
self._n = n
self._cutoff = cutoff
self._history = Window(n-1)
self._backoff = backoff
def train(self, tagged_corpus, verbose=False):
"""
Train this C{tagger.Ngram} using the given training data.
@param tagged_corpus: A tagged corpus. Each item should be
a C{list} of tagged tokens, where each consists of
C{text} and a C{tag}.
@type tagged_corpus: C{list} or C{iter(list)}
"""
if self.size() != 0:
raise ValueError, 'Tagger is already trained'
token_count = hit_count = 0
fd = ConditionalFreqDist()
for sentence in tagged_corpus:
self._history.clear()
for (token, tag) in sentence:
token_count += 1
history = tuple(self._history)
fd[(history, token)].inc(tag)
self._history.append(tag)
for context in fd.conditions():
best_tag = fd[context].max()
history = tuple(self._history)
backoff_tag = self._backoff_tag_one(token, history)
hits = fd[context].count(best_tag)
# is the tag we would assign different from the backoff tagger
# and do we have sufficient evidence?
if best_tag != backoff_tag and hits > self._cutoff:
self._model[context] = best_tag
hit_count += hits
# generate stats
if verbose:
size = len(self._model)
backoff = 100 - (hit_count * 100.0)/ token_count
pruning = 100 - (size * 100.0) / len(fd.conditions())
print "[Trained %d-gram tagger:" % self._n,
print "size=%d, backoff=%.2f%%, pruning=%.2f%%]" % (
size, backoff, pruning)
def tag_one(self, token, history=None):
if self.size() == 0:
raise ValueError, 'Tagger is not trained'
if history:
self._history.set(history) # NB this may truncate history
history = tuple(self._history)
context = (history, token)
if self._model.has_key(context):
return self._model[context]
if self._backoff:
return self._backoff.tag_one(token, history)
return None
def size(self):
return len(self._model)
def __repr__(self):
return '<%d-gram Tagger: size=%d, cutoff=%d>' % (
self._n, self.size(), self._cutoff)
class Bigram(Ngram):
def __init__(self, cutoff=1, backoff=None):
Ngram.__init__(self, 2, cutoff, backoff)
class Trigram(Ngram):
def __init__(self, cutoff=1, backoff=None):
Ngram.__init__(self, 3, cutoff, backoff)
###
#
# def print_usage_stats(self):
# total = self._total_count
# print ' %20s | %s' % ('Tagger', 'Words Tagged')
# print ' '+'-'*21+'|'+'-'*17
# for tagger in self._taggers:
# count = self._tagger_count[tagger]
# print ' %20s | %4.1f%%' % (tagger, 100.0*count/total)
#
# def __repr__(self):
# return '<BackoffTagger: %s>' % self._taggers
###
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _demo_tagger(tagger, gold):
from en.parser.nltk_lite.tag import accuracy
acc = accuracy(tagger, gold)
print 'Accuracy = %4.1f%%' % (100.0 * acc)
def demo():
"""
A simple demonstration function for the C{Tagger} classes. It
constructs a backoff tagger using a trigram tagger, bigram tagger
unigram tagger and a default tagger. It trains and tests the
tagger using the Brown corpus.
"""
from en.parser.nltk_lite.corpora import brown
import sys
print 'Training taggers.'
# Create a default tagger
t0 = Default('nn')
# t1a = Affix(length=-3, minlength=5, backoff=t0)
# t1b = Unigram(cutoff=2, backoff=t1a)
t1 = Unigram(cutoff=1, backoff=t0)
t2 = Bigram(cutoff=1, backoff=t1)
t3 = Trigram(backoff=t2)
t1.train(brown.tagged('a'), verbose=True)
t2.train(brown.tagged('a'), verbose=True)
t3.train(brown.tagged('a'), verbose=True)
# Tokenize the testing files
test_tokens = []
num_words = 0
# Run the taggers. For t0, t1, and t2, back-off to the default tagger.
# This is especially important for t1 and t2, which count on
# having known tags as contexts; if they get a context containing
# None, then they will generate an output of None, and so all
# words will get tagged a None.
print '='*75
print 'Running the taggers on test data...'
print ' Default (nn) tagger: ',
sys.stdout.flush()
_demo_tagger(t0, brown.tagged('b'))
print ' Unigram tagger: ',
sys.stdout.flush()
_demo_tagger(t1, list(brown.tagged('b'))[:1000])
print ' Bigram tagger: ',
sys.stdout.flush()
_demo_tagger(t2, list(brown.tagged('b'))[:1000])
print ' Trigram tagger: ',
sys.stdout.flush()
_demo_tagger(t3, list(brown.tagged('b'))[:1000])
# print '\nUsage statistics for the trigram tagger:\n'
# trigram.print_usage_stats()
# print '='*75
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Hidden Markov Model
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Trevor Cohn <tacohn@csse.unimelb.edu.au>
# Philip Blunsom <pcbl@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: hmm.py 3672 2006-11-07 16:27:02Z stevenbird $
"""
Hidden Markov Models (HMMs) largely used to assign the correct label sequence
to sequential data or assess the probability of a given label and data
sequence. These models are finite state machines characterised by a number of
states, transitions between these states, and output symbols emitted while in
each state. The HMM is an extension to the Markov chain, where each state
corresponds deterministically to a given event. In the HMM the observation is
a probabilistic function of the state. HMMs share the Markov chain's
assumption, being that the probability of transition from one state to another
only depends on the current state - i.e. the series of states that led to the
current state are not used. They are also time invariant.
The HMM is a directed graph, with probability weighted edges (representing the
probability of a transition between the source and sink states) where each
vertex emits an output symbol when entered. The symbol (or observation) is
non-deterministically generated. For this reason, knowing that a sequence of
output observations was generated by a given HMM does not mean that the
corresponding sequence of states (and what the current state is) is known.
This is the 'hidden' in the hidden markov model.
Formally, a HMM can be characterised by:
- the output observation alphabet. This is the set of symbols which may be
observed as output of the system.
- the set of states.
- the transition probabilities M{a_{ij} = P(s_t = j | s_{t-1} = i)}. These
represent the probability of transition to each state from a given
state.
- the output probability matrix M{b_i(k) = P(X_t = o_k | s_t = i)}. These
represent the probability of observing each symbol in a given state.
- the initial state distribution. This gives the probability of starting
in each state.
To ground this discussion, take a common NLP application, part-of-speech (POS)
tagging. An HMM is desirable for this task as the highest probability tag
sequence can be calculated for a given sequence of word forms. This differs
from other tagging techniques which often tag each word individually, seeking
to optimise each individual tagging greedily without regard to the optimal
combination of tags for a larger unit, such as a sentence. The HMM does this
with the Viterbi algorithm, which efficiently computes the optimal path
through the graph given the sequence of words forms.
In POS tagging the states usually have a 1:1 correspondence with the tag
alphabet - i.e. each state represents a single tag. The output observation
alphabet is the set of word forms (the lexicon), and the remaining three
parameters are derived by a training regime. With this information the
probability of a given sentence can be easily derived, by simply summing the
probability of each distinct path through the model. Similarly, the highest
probability tagging sequence can be derived with the Viterbi algorithm,
yielding a state sequence which can be mapped into a tag sequence.
This discussion assumes that the HMM has been trained. This is probably the
most difficult task with the model, and requires either MLE estimates of the
parameters or unsupervised learning using the Baum-Welch algorithm, a variant
of EM.
"""
from en.parser.nltk_lite.probability import *
from numpy import *
import re
# _NINF = float('-inf') # won't work on Windows
_NINF = float('-1e300')
_TEXT = 0 # index of text in a tuple
_TAG = 1 # index of tag in a tuple
class HiddenMarkovModel(object):
"""
Hidden Markov model class, a generative model for labelling sequence data.
These models define the joint probability of a sequence of symbols and
their labels (state transitions) as the product of the starting state
probability, the probability of each state transition, and the probability
of each observation being generated from each state. This is described in
more detail in the module documentation.
This implementation is based on the HMM description in Chapter 8, Huang,
Acero and Hon, Spoken Language Processing.
"""
def __init__(self, symbols, states, transitions, outputs, priors):
"""
Creates a hidden markov model parametised by the the states,
transition probabilities, output probabilities and priors.
@param symbols: the set of output symbols (alphabet)
@type symbols: (seq) of any
@param states: a set of states representing state space
@type states: seq of any
@param transitions: transition probabilities; Pr(s_i | s_j)
is the probability of transition from state i
given the model is in state_j
@type transitions: C{ConditionalProbDistI}
@param outputs: output probabilities; Pr(o_k | s_i) is the
probability of emitting symbol k when entering
state i
@type outputs: C{ConditionalProbDistI}
@param priors: initial state distribution; Pr(s_i) is the
probability of starting in state i
@type priors: C{ProbDistI}
"""
self._states = states
self._transitions = transitions
self._symbols = symbols
self._outputs = outputs
self._priors = priors
def probability(self, sequence):
"""
Returns the probability of the given symbol sequence. If the sequence
is labelled, then returns the joint probability of the symbol, state
sequence. Otherwise, uses the forward algorithm to find the
probability over all label sequences.
@return: the probability of the sequence
@rtype: float
@param sequence: the sequence of symbols which must contain the TEXT
property, and optionally the TAG property
@type sequence: Token
"""
return exp(self.log_probability(sequence))
def log_probability(self, sequence):
"""
Returns the log-probability of the given symbol sequence. If the
sequence is labelled, then returns the joint log-probability of the
symbol, state sequence. Otherwise, uses the forward algorithm to find
the log-probability over all label sequences.
@return: the log-probability of the sequence
@rtype: float
@param sequence: the sequence of symbols which must contain the TEXT
property, and optionally the TAG property
@type sequence: Token
"""
T = len(sequence)
N = len(self._states)
if T > 0 and sequence[0][_TAG]:
last_state = sequence[0][_TAG]
p = self._priors.logprob(last_state) + \
self._outputs[last_state].logprob(sequence[0][_TEXT])
for t in range(1, T):
state = sequence[t][_TAG]
p += self._transitions[last_state].logprob(state) + \
self._outputs[state].logprob(sequence[t][_TEXT])
return p
else:
alpha = self._forward_probability(sequence)
p = _log_add(*alpha[T-1, :])
return p
def tag(self, unlabelled_sequence):
"""
Tags the sequence with the highest probability state sequence. This
uses the best_path method to find the Viterbi path.
@return: a labelled sequence of symbols
@rtype: list
@param unlabelled_sequence: the sequence of unlabelled symbols
@type unlabelled_sequence: list
"""
path = self.best_path(unlabelled_sequence)
for i in range(len(path)):
unlabelled_sequence[i] = (unlabelled_sequence[i][_TEXT], path[i])
return unlabelled_sequence
def _output_logprob(self, state, symbol):
"""
@return: the log probability of the symbol being observed in the given
state
@rtype: float
"""
return self._outputs[state].logprob(symbol)
def best_path(self, unlabelled_sequence):
"""
Returns the state sequence of the optimal (most probable) path through
the HMM. Uses the Viterbi algorithm to calculate this part by dynamic
programming.
@return: the state sequence
@rtype: sequence of any
@param unlabelled_sequence: the sequence of unlabelled symbols
@type unlabelled_sequence: list
"""
T = len(unlabelled_sequence)
N = len(self._states)
V = zeros((T, N), float64)
B = {}
# find the starting log probabilities for each state
symbol = unlabelled_sequence[0][_TEXT]
for i, state in enumerate(self._states):
V[0, i] = self._priors.logprob(state) + \
self._output_logprob(state, symbol)
B[0, state] = None
# find the maximum log probabilities for reaching each state at time t
for t in range(1, T):
symbol = unlabelled_sequence[t][_TEXT]
for j in range(N):
sj = self._states[j]
best = None
for i in range(N):
si = self._states[i]
va = V[t-1, i] + self._transitions[si].logprob(sj)
if not best or va > best[0]:
best = (va, si)
V[t, j] = best[0] + self._output_logprob(sj, symbol)
B[t, sj] = best[1]
# find the highest probability final state
best = None
for i in range(N):
val = V[T-1, i]
if not best or val > best[0]:
best = (val, self._states[i])
# traverse the back-pointers B to find the state sequence
current = best[1]
sequence = [current]
for t in range(T-1, 0, -1):
last = B[t, current]
sequence.append(last)
current = last
sequence.reverse()
return sequence
def random_sample(self, rng, length):
"""
Randomly sample the HMM to generate a sentence of a given length. This
samples the prior distribution then the observation distribution and
transition distribution for each subsequent observation and state.
This will mostly generate unintelligible garbage, but can provide some
amusement.
@return: the randomly created state/observation sequence,
generated according to the HMM's probability
distributions. The SUBTOKENS have TEXT and TAG
properties containing the observation and state
respectively.
@rtype: list
@param rng: random number generator
@type rng: Random (or any object with a random() method)
@param length: desired output length
@type length: int
"""
# sample the starting state and symbol prob dists
tokens = []
state = self._sample_probdist(self._priors, rng.random(), self._states)
symbol = self._sample_probdist(self._outputs[state],
rng.random(), self._symbols)
tokens.append((symbol, state))
for i in range(1, length):
# sample the state transition and symbol prob dists
state = self._sample_probdist(self._transitions[state],
rng.random(), self._states)
symbol = self._sample_probdist(self._outputs[state],
rng.random(), self._symbols)
tokens.append((symbol, state))
return tokens
def _sample_probdist(self, probdist, p, samples):
cum_p = 0
for sample in samples:
add_p = probdist.prob(sample)
if cum_p <= p <= cum_p + add_p:
return sample
cum_p += add_p
raise Exception('Invalid probability distribution - does not sum to one')
def entropy(self, unlabelled_sequence):
"""
Returns the entropy over labellings of the given sequence. This is
given by:
H(O) = - sum_S Pr(S | O) log Pr(S | O)
where the summation ranges over all state sequences, S. Let M{Z =
Pr(O) = sum_S Pr(S, O)} where the summation ranges over all state
sequences and O is the observation sequence. As such the entropy can
be re-expressed as:
H = - sum_S Pr(S | O) log [ Pr(S, O) / Z ]
= log Z - sum_S Pr(S | O) log Pr(S, 0)
= log Z - sum_S Pr(S | O) [ log Pr(S_0) + sum_t Pr(S_t | S_{t-1})
+ sum_t Pr(O_t | S_t) ]
The order of summation for the log terms can be flipped, allowing
dynamic programming to be used to calculate the entropy. Specifically,
we use the forward and backward probabilities (alpha, beta) giving:
H = log Z - sum_s0 alpha_0(s0) beta_0(s0) / Z * log Pr(s0)
+ sum_t,si,sj alpha_t(si) Pr(sj | si) Pr(O_t+1 | sj) beta_t(sj)
/ Z * log Pr(sj | si)
+ sum_t,st alpha_t(st) beta_t(st) / Z * log Pr(O_t | st)
This simply uses alpha and beta to find the probabilities of partial
sequences, constrained to include the given state(s) at some point in
time.
"""
T = len(unlabelled_sequence)
N = len(self._states)
alpha = self._forward_probability(unlabelled_sequence)
beta = self._backward_probability(unlabelled_sequence)
normalisation = _log_add(*alpha[T-1, :])
entropy = normalisation
# starting state, t = 0
for i, state in enumerate(self._states):
p = exp(alpha[0, i] + beta[0, i] - normalisation)
entropy -= p * self._priors.logprob(state)
#print 'p(s_0 = %s) =' % state, p
# state transitions
for t0 in range(T - 1):
t1 = t0 + 1
for i0, s0 in enumerate(self._states):
for i1, s1 in enumerate(self._states):
p = exp(alpha[t0, i0] + self._transitions[s0].logprob(s1) +
self._outputs[s1].logprob(unlabelled_sequence[t1][_TEXT]) +
beta[t1, i1] - normalisation)
entropy -= p * self._transitions[s0].logprob(s1)
#print 'p(s_%d = %s, s_%d = %s) =' % (t0, s0, t1, s1), p
# symbol emissions
for t in range(T):
for i, state in enumerate(self._states):
p = exp(alpha[t, i] + beta[t, i] - normalisation)
entropy -= p * self._outputs[state].logprob(unlabelled_sequence[t][_TEXT])
#print 'p(s_%d = %s) =' % (t, state), p
return entropy
def point_entropy(self, unlabelled_sequence):
"""
Returns the pointwise entropy over the possible states at each
position in the chain, given the observation sequence.
"""
T = len(unlabelled_sequence)
N = len(self._states)
alpha = self._forward_probability(unlabelled_sequence)
beta = self._backward_probability(unlabelled_sequence)
normalisation = _log_add(*alpha[T-1, :])
entropies = zeros(T, float64)
probs = zeros(N, float64)
for t in range(T):
for s in range(N):
probs[s] = alpha[t, s] + beta[t, s] - normalisation
for s in range(N):
entropies[t] -= exp(probs[s]) * probs[s]
return entropies
def _exhaustive_entropy(self, unlabelled_sequence):
T = len(unlabelled_sequence)
N = len(self._states)
labellings = [[state] for state in self._states]
for t in range(T - 1):
current = labellings
labellings = []
for labelling in current:
for state in self._states:
labellings.append(labelling + [state])
log_probs = []
for labelling in labellings:
labelled_sequence = unlabelled_sequence[:]
for t, label in enumerate(labelling):
labelled_sequence[t] = (labelled_sequence[t][_TEXT], label)
lp = self.log_probability(labelled_sequence)
log_probs.append(lp)
normalisation = _log_add(*log_probs)
#ps = zeros((T, N), float64)
#for labelling, lp in zip(labellings, log_probs):
#for t in range(T):
#ps[t, self._states.index(labelling[t])] += exp(lp - normalisation)
#for t in range(T):
#print 'prob[%d] =' % t, ps[t]
entropy = 0
for lp in log_probs:
lp -= normalisation
entropy -= exp(lp) * lp
return entropy
def _exhaustive_point_entropy(self, unlabelled_sequence):
T = len(unlabelled_sequence)
N = len(self._states)
labellings = [[state] for state in self._states]
for t in range(T - 1):
current = labellings
labellings = []
for labelling in current:
for state in self._states:
labellings.append(labelling + [state])
log_probs = []
for labelling in labellings:
labelled_sequence = unlabelled_sequence[:]
for t, label in enumerate(labelling):
labelled_sequence[t] = (labelled_sequence[t][_TEXT], label)
lp = self.log_probability(labelled_sequence)
log_probs.append(lp)
normalisation = _log_add(*log_probs)
probabilities = zeros((T, N), float64)
probabilities[:] = _NINF
for labelling, lp in zip(labellings, log_probs):
lp -= normalisation
for t, label in enumerate(labelling):
index = self._states.index(label)
probabilities[t, index] = _log_add(probabilities[t, index], lp)
entropies = zeros(T, float64)
for t in range(T):
for s in range(N):
entropies[t] -= exp(probabilities[t, s]) * probabilities[t, s]
return entropies
def _forward_probability(self, unlabelled_sequence):
"""
Return the forward probability matrix, a T by N array of
log-probabilities, where T is the length of the sequence and N is the
number of states. Each entry (t, s) gives the probability of being in
state s at time t after observing the partial symbol sequence up to
and including t.
@return: the forward log probability matrix
@rtype: array
@param unlabelled_sequence: the sequence of unlabelled symbols
@type unlabelled_sequence: list
"""
T = len(unlabelled_sequence)
N = len(self._states)
alpha = zeros((T, N), float64)
symbol = unlabelled_sequence[0][_TEXT]
for i, state in enumerate(self._states):
alpha[0, i] = self._priors.logprob(state) + \
self._outputs[state].logprob(symbol)
for t in range(1, T):
symbol = unlabelled_sequence[t][_TEXT]
for i, si in enumerate(self._states):
alpha[t, i] = _NINF
for j, sj in enumerate(self._states):
alpha[t, i] = _log_add(alpha[t, i], alpha[t-1, j] +
self._transitions[sj].logprob(si))
alpha[t, i] += self._outputs[si].logprob(symbol)
return alpha
def _backward_probability(self, unlabelled_sequence):
"""
Return the backward probability matrix, a T by N array of
log-probabilities, where T is the length of the sequence and N is the
number of states. Each entry (t, s) gives the probability of being in
state s at time t after observing the partial symbol sequence from t
.. T.
@return: the backward log probability matrix
@rtype: array
@param unlabelled_sequence: the sequence of unlabelled symbols
@type unlabelled_sequence: list
"""
T = len(unlabelled_sequence)
N = len(self._states)
beta = zeros((T, N), float64)
# initialise the backward values
beta[T-1, :] = log(1)
# inductively calculate remaining backward values
for t in range(T-2, -1, -1):
symbol = unlabelled_sequence[t+1][_TEXT]
for i, si in enumerate(self._states):
beta[t, i] = _NINF
for j, sj in enumerate(self._states):
beta[t, i] = _log_add(beta[t, i],
self._transitions[si].logprob(sj) +
self._outputs[sj].logprob(symbol) +
beta[t + 1, j])
return beta
def __repr__(self):
return '<HiddenMarkovModel %d states and %d output symbols>' \
% (len(self._states), len(self._symbols))
class HiddenMarkovModelTrainer(object):
"""
Algorithms for learning HMM parameters from training data. These include
both supervised learning (MLE) and unsupervised learning (Baum-Welch).
"""
def __init__(self, states=None, symbols=None):
"""
Creates an HMM trainer to induce an HMM with the given states and
output symbol alphabet. A supervised and unsupervised training
method may be used. If either of the states or symbols are not given,
these may be derived from supervised training.
@param states: the set of state labels
@type states: sequence of any
@param symbols: the set of observation symbols
@type symbols: sequence of any
"""
if states:
self._states = states
else:
self._states = []
if symbols:
self._symbols = symbols
else:
self._symbols = []
def train(self, labelled_sequences=None, unlabelled_sequences=None,
**kwargs):
"""
Trains the HMM using both (or either of) supervised and unsupervised
techniques.
@return: the trained model
@rtype: HiddenMarkovModel
@param labelled_sequences: the supervised training data, a set of
labelled sequences of observations
@type labelled_sequences: list
@param unlabelled_sequences: the unsupervised training data, a set of
sequences of observations
@type unlabelled_sequences: list
@param kwargs: additional arguments to pass to the training methods
"""
assert labelled_sequences or unlabelled_sequences
model = None
if labelled_sequences:
model = self.train_supervised(labelled_sequences, **kwargs)
if unlabelled_sequences:
if model: kwargs['model'] = model
model = self.train_unsupervised(unlabelled_sequences, **kwargs)
return model
def train_unsupervised(self, unlabelled_sequences, **kwargs):
"""
Trains the HMM using the Baum-Welch algorithm to maximise the
probability of the data sequence. This is a variant of the EM
algorithm, and is unsupervised in that it doesn't need the state
sequences for the symbols. The code is based on 'A Tutorial on Hidden
Markov Models and Selected Applications in Speech Recognition',
Lawrence Rabiner, IEEE, 1989.
@return: the trained model
@rtype: HiddenMarkovModel
@param unlabelled_sequences: the training data, a set of
sequences of observations
@type unlabelled_sequences: list
@param kwargs: may include the following parameters::
model - a HiddenMarkovModel instance used to begin the Baum-Welch
algorithm
max_iterations - the maximum number of EM iterations
convergence_logprob - the maximum change in log probability to
allow convergence
"""
N = len(self._states)
M = len(self._symbols)
symbol_dict = dict([(self._symbols[i], i) for i in range(M)])
# create a uniform HMM, which will be iteratively refined, unless
# given an existing model
model = kwargs.get('model')
if not model:
priors = UniformProbDist(self._states)
transitions = DictionaryConditionalProbDist(
dict([(state, UniformProbDist(self._states))
for state in self._states]))
output = DictionaryConditionalProbDist(
dict([(state, UniformProbDist(self._symbols))
for state in self._states]))
model = HiddenMarkovModel(self._symbols, self._states,
transitions, output, priors)
# update model prob dists so that they can be modified
model._priors = MutableProbDist(model._priors, self._states)
model._transitions = DictionaryConditionalProbDist(
dict([(s, MutableProbDist(model._transitions[s], self._states))
for s in self._states]))
model._outputs = DictionaryConditionalProbDist(
dict([(s, MutableProbDist(model._outputs[s], self._symbols))
for s in self._states]))
# iterate until convergence
converged = False
last_logprob = None
iteration = 0
max_iterations = kwargs.get('max_iterations', 1000)
epsilon = kwargs.get('convergence_logprob', 1e-6)
while not converged and iteration < max_iterations:
A_numer = ones((N, N), float64) * _NINF
B_numer = ones((N, M), float64) * _NINF
A_denom = ones(N, float64) * _NINF
B_denom = ones(N, float64) * _NINF
logprob = 0
for sequence in unlabelled_sequences:
# compute forward and backward probabilities
alpha = model._forward_probability(sequence)
beta = model._backward_probability(sequence)
# find the log probability of the sequence
T = len(sequence)
lpk = _log_add(*alpha[T-1, :])
logprob += lpk
# now update A and B (transition and output probabilities)
# using the alpha and beta values. Please refer to Rabiner's
# paper for details, it's too hard to explain in comments
local_A_numer = ones((N, N), float64) * _NINF
local_B_numer = ones((N, M), float64) * _NINF
local_A_denom = ones(N, float64) * _NINF
local_B_denom = ones(N, float64) * _NINF
# for each position, accumulate sums for A and B
for t in range(T):
x = sequence[t][_TEXT] #not found? FIXME
if t < T - 1:
xnext = sequence[t+1][_TEXT] #not found? FIXME
xi = symbol_dict[x]
for i in range(N):
si = self._states[i]
if t < T - 1:
for j in range(N):
sj = self._states[j]
local_A_numer[i, j] = \
_log_add(local_A_numer[i, j],
alpha[t, i] +
model._transitions[si].logprob(sj) +
model._outputs[sj].logprob(xnext) +
beta[t+1, j])
local_A_denom[i] = _log_add(local_A_denom[i],
alpha[t, i] + beta[t, i])
else:
local_B_denom[i] = _log_add(local_A_denom[i],
alpha[t, i] + beta[t, i])
local_B_numer[i, xi] = _log_add(local_B_numer[i, xi],
alpha[t, i] + beta[t, i])
# add these sums to the global A and B values
for i in range(N):
for j in range(N):
A_numer[i, j] = _log_add(A_numer[i, j],
local_A_numer[i, j] - lpk)
for k in range(M):
B_numer[i, k] = _log_add(B_numer[i, k],
local_B_numer[i, k] - lpk)
A_denom[i] = _log_add(A_denom[i], local_A_denom[i] - lpk)
B_denom[i] = _log_add(B_denom[i], local_B_denom[i] - lpk)
# use the calculated values to update the transition and output
# probability values
for i in range(N):
si = self._states[i]
for j in range(N):
sj = self._states[j]
model._transitions[si].update(sj, A_numer[i,j] - A_denom[i])
for k in range(M):
ok = self._symbols[k]
model._outputs[si].update(ok, B_numer[i,k] - B_denom[i])
# Rabiner says the priors don't need to be updated. I don't
# believe him. FIXME
# test for convergence
if iteration > 0 and abs(logprob - last_logprob) < epsilon:
converged = True
print 'iteration', iteration, 'logprob', logprob
iteration += 1
last_logprob = logprob
return model
def train_supervised(self, labelled_sequences, **kwargs):
"""
Supervised training maximising the joint probability of the symbol and
state sequences. This is done via collecting frequencies of
transitions between states, symbol observations while within each
state and which states start a sentence. These frequency distributions
are then normalised into probability estimates, which can be
smoothed if desired.
@return: the trained model
@rtype: HiddenMarkovModel
@param labelled_sequences: the training data, a set of
labelled sequences of observations
@type labelled_sequences: list
@param kwargs: may include an 'estimator' parameter, a function taking
a C{FreqDist} and a number of bins and returning a C{ProbDistI};
otherwise a MLE estimate is used
"""
# default to the MLE estimate
estimator = kwargs.get('estimator')
if estimator == None:
estimator = lambda fdist, bins: MLEProbDist(fdist)
# count occurences of starting states, transitions out of each state
# and output symbols observed in each state
starting = FreqDist()
transitions = ConditionalFreqDist()
outputs = ConditionalFreqDist()
for sequence in labelled_sequences:
lasts = None
for token in sequence:
state = token[_TAG]
symbol = token[_TEXT]
if lasts == None:
starting.inc(state)
else:
transitions[lasts].inc(state)
outputs[state].inc(symbol)
lasts = state
# update the state and symbol lists
if state not in self._states:
self._states.append(state)
if symbol not in self._symbols:
self._symbols.append(symbol)
# create probability distributions (with smoothing)
N = len(self._states)
pi = estimator(starting, N)
A = ConditionalProbDist(transitions, estimator, False, N)
B = ConditionalProbDist(outputs, estimator, False, len(self._symbols))
return HiddenMarkovModel(self._symbols, self._states, A, B, pi)
def _log_add(*values):
"""
Adds the logged values, returning the logarithm of the addition.
"""
x = max(values)
if x > _NINF:
sum_diffs = 0
for value in values:
sum_diffs += exp(value - x)
return x + log(sum_diffs)
else:
return x
def demo():
# demonstrates HMM probability calculation
# example taken from page 381, Huang et al
symbols = ['up', 'down', 'unchanged']
states = ['bull', 'bear', 'static']
def pd(values, samples):
d = {}
for value, item in zip(values, samples):
d[item] = value
return DictionaryProbDist(d)
def cpd(array, conditions, samples):
d = {}
for values, condition in zip(array, conditions):
d[condition] = pd(values, samples)
return DictionaryConditionalProbDist(d)
A = array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], float64)
A = cpd(A, states, states)
B = array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], float64)
B = cpd(B, states, symbols)
pi = array([0.5, 0.2, 0.3], float64)
pi = pd(pi, states)
model = HiddenMarkovModel(symbols=symbols, states=states,
transitions=A, outputs=B, priors=pi)
print 'Testing', model
for test in [['up', 'up'], ['up', 'down', 'up'],
['down'] * 5, ['unchanged'] * 5 + ['up']]:
sequence = [(t, None) for t in test]
print 'Testing with state sequence', test
print 'probability =', model.probability(sequence)
print 'tagging = ', model.tag(sequence)
print 'p(tagged) = ', model.probability(sequence)
print 'H = ', model.entropy(sequence)
print 'H_exh = ', model._exhaustive_entropy(sequence)
print 'H(point) = ', model.point_entropy(sequence)
print 'H_exh(point)=', model._exhaustive_point_entropy(sequence)
print
def load_pos():
from en.parser.nltk_lite.corpora import brown
from itertools import islice
sentences = list(islice(brown.tagged(), 100))
tag_set = ["'", "''", '(', ')', '*', ',', '.', ':', '--', '``', 'abl',
'abn', 'abx', 'ap', 'ap$', 'at', 'be', 'bed', 'bedz', 'beg', 'bem',
'ben', 'ber', 'bez', 'cc', 'cd', 'cd$', 'cs', 'do', 'dod', 'doz',
'dt', 'dt$', 'dti', 'dts', 'dtx', 'ex', 'fw', 'hv', 'hvd', 'hvg',
'hvn', 'hvz', 'in', 'jj', 'jjr', 'jjs', 'jjt', 'md', 'nn', 'nn$',
'nns', 'nns$', 'np', 'np$', 'nps', 'nps$', 'nr', 'nr$', 'od', 'pn',
'pn$', 'pp$', 'ppl', 'ppls', 'ppo', 'pps', 'ppss', 'ql', 'qlp', 'rb',
'rb$', 'rbr', 'rbt', 'rp', 'to', 'uh', 'vb', 'vbd', 'vbg', 'vbn',
'vbz', 'wdt', 'wp$', 'wpo', 'wps', 'wql', 'wrb']
sequences = []
sequence = []
symbols = set()
start_re = re.compile(r'[^-*+]*')
for sentence in sentences:
for i in range(len(sentence)):
word, tag = sentence[i]
word = word.lower() # normalize
symbols.add(word) # log this word
m = start_re.match(tag)
# cleanup the tag
tag = m.group(0)
if tag not in tag_set:
tag = '*'
sentence[i] = (word, tag) # store cleaned-up tagged token
return sentences, tag_set, list(symbols)
def test_pos(model, sentences, display=False):
from sys import stdout
count = correct = 0
for sentence in sentences:
sentence = [(token[0], None) for token in sentence]
pts = model.best_path(sentence)
if display:
print sentence
print 'HMM >>>'
print pts
print model.entropy(sentences)
print '-' * 60
else:
print '\b.',
stdout.flush()
for token, tag in zip(sentence, pts):
count += 1
if tag == token[TAG]:
correct += 1
print 'accuracy over', count, 'tokens %.1f' % (100.0 * correct / count)
def demo_pos():
# demonstrates POS tagging using supervised training
print 'Training HMM...'
labelled_sequences, tag_set, symbols = load_pos()
trainer = HiddenMarkovModelTrainer(tag_set, symbols)
hmm = trainer.train_supervised(labelled_sequences[100:],
estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins))
print 'Testing...'
test_pos(hmm, labelled_sequences[:100], True)
def _untag(sentences):
unlabelled = []
for sentence in sentences:
unlabelled.append([(token[0], None) for token in sentence])
return unlabelled
def demo_pos_bw():
# demonstrates the Baum-Welch algorithm in POS tagging
print 'Training HMM (supervised)...'
sentences, tag_set, symbols = load_pos()
symbols = set()
for sentence in sentences:
for token in sentence:
symbols.add(token[_TEXT])
trainer = HiddenMarkovModelTrainer(tag_set, list(symbols))
hmm = trainer.train_supervised(sentences[100:300],
estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins))
print 'Training (unsupervised)...'
# it's rather slow - so only use 10 samples
unlabelled = _untag(sentences[301:311])
hmm = trainer.train_unsupervised(unlabelled, model=hmm, max_iterations=5)
test_pos(hmm, sentences[:100], True)
def demo_bw():
# demo Baum Welch by generating some sequences and then performing
# unsupervised training on them
# example taken from page 381, Huang et al
symbols = ['up', 'down', 'unchanged']
states = ['bull', 'bear', 'static']
def pd(values, samples):
d = {}
for value, item in zip(values, samples):
d[item] = value
return DictionaryProbDist(d)
def cpd(array, conditions, samples):
d = {}
for values, condition in zip(array, conditions):
d[condition] = pd(values, samples)
return DictionaryConditionalProbDist(d)
A = array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], float64)
A = cpd(A, states, states)
B = array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], float64)
B = cpd(B, states, symbols)
pi = array([0.5, 0.2, 0.3], float64)
pi = pd(pi, states)
model = HiddenMarkovModel(symbols=symbols, states=states,
transitions=A, outputs=B, priors=pi)
# generate some random sequences
training = []
import random
rng = random.Random()
for i in range(10):
item = model.random_sample(rng, 5)
training.append([(i[0], None) for i in item])
# train on those examples, starting with the model that generated them
trainer = HiddenMarkovModelTrainer(states, symbols)
hmm = trainer.train_unsupervised(training, model=model, max_iterations=1000)
if __name__ == '__main__':
demo()
#demo_pos()
#demo_pos_bw()
#demo_bw()
| Python |
# Natural Language Toolkit: Unigram Taggers
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (minor additions)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for tagging each token of a document with
supplementary information, such as its part of speech or its WordNet
synset tag. This task, which is known as X{tagging}, is defined by
the L{TagI} interface.
"""
import re
from en.parser.nltk_lite.probability import FreqDist, ConditionalFreqDist
##############################################################
# UNIGRAM TAGGERS: only use information about the current word
##############################################################
from en.parser.nltk_lite.tag import *
class Unigram(SequentialBackoff):
"""
A unigram stochastic tagger. Before C{tag.Unigram} can be
used, it should be trained on a tagged corpus. Using this
training data, it will find the most likely tag for each word
type. It will then use this information to assign the most
frequent tag to each word. If C{tag.Unigram} encounters a
word which it has no data, it will assign it the
tag C{None}.
"""
def __init__(self, cutoff=1, backoff=None):
"""
Construct a new unigram stochastic tagger. The new tagger
should be trained, using the L{train()} method, before it is
used to tag data.
"""
self._model = {}
self._cutoff = cutoff
self._backoff = backoff
self._history = None
def train(self, tagged_corpus, verbose=False):
"""
Train C{tag.Unigram} using the given training data.
@param tagged_corpus: A tagged corpus. Each item should be
a C{list} of tagged tokens, where each consists of
C{text} and a C{tag}.
@type tagged_corpus: C{list} or C{iter(list)}
"""
if self.size() != 0:
raise ValueError, 'Tagger is already trained'
token_count = hit_count = 0
fd = ConditionalFreqDist()
if isinstance(tagged_corpus, list) and isinstance(tagged_corpus[0], tuple):
tagged_corpus = [tagged_corpus]
for sentence in tagged_corpus:
for (token, tag) in sentence:
token_count += 1
fd[token].inc(tag)
for token in fd.conditions():
best_tag = fd[token].max()
backoff_tag = self._backoff_tag_one(token)
hits = fd[token].count(best_tag)
# is the tag we would assign different from the backoff tagger
# and do we have sufficient evidence?
if best_tag != backoff_tag and hits > self._cutoff:
self._model[token] = best_tag
hit_count += hits
# generate stats
if verbose:
size = len(self._model)
backoff = 100 - (hit_count * 100.0)/ token_count
pruning = 100 - (size * 100.0) / len(fd.conditions())
print "[Trained Unigram tagger:",
print "size=%d, backoff=%.2f%%, pruning=%.2f%%]" % (
size, backoff, pruning)
def tag_one(self, token, history=None):
if self.size() == 0:
raise ValueError, 'Tagger is not trained'
if self._model.has_key(token):
return self._model[token]
if self._backoff:
return self._backoff.tag_one(token, history)
return None
def size(self):
return len(self._model)
def __repr__(self):
return '<Unigram Tagger: size=%d, cutoff=%d>' % (
self.size(), self._cutoff)
# Affix tagger, based on code by Tiago Tresoldi <tresoldi@users.sf.net>
class Affix(SequentialBackoff):
"""
A unigram tagger that assign tags to tokens based on leading or
trailing substrings (it is important to note that the substrings
are not necessarily "true" morphological affixes). Before
C{tag.Affix} can be used, it should be trained on a tagged
corpus. Using this training data, it will find the most likely tag
for each word type. It will then use this information to assign
the most frequent tag to each word. If the C{tag.Affix}
encounters a prefix or suffix in a word for which it has no data,
it will assign the tag C{None}.
"""
def __init__ (self, length, minlength, cutoff=1, backoff=None):
"""
Construct a new affix stochastic tagger. The new tagger should be
trained, using the L{train()} method, before it is used to tag
data.
@type length: C{number}
@param length: The length of the affix to be considered during
training and tagging (negative for suffixes)
@type minlength: C{number}
@param minlength: The minimum length for a word to be considered
during training and tagging. It must be longer that C{length}.
"""
# SequentialBackoff.__init__(self)
self._model = {}
assert minlength > 0
self._length = length
self._minlength = minlength
self._cutoff = cutoff
self._backoff = backoff
self._history = None
def _get_affix(self, token):
if self._length > 0:
return token[:self._length]
else:
return token[self._length:]
def train(self, tagged_corpus, verbose=False):
"""
Train C{tag.Affix} using the given training data. If this
method is called multiple times, then the training data will be
combined.
@param tagged_corpus: A tagged corpus. Each item should be
a C{list} of tagged tokens, where each consists of
C{text} and a C{tag}.
@type tagged_corpus: C{list} or C{iter(list)}
"""
if self.size() != 0:
raise ValueError, 'Tagger is already trained'
token_count = hit_count = 0
fd = ConditionalFreqDist()
for sentence in tagged_corpus:
for (token, tag) in sentence:
token_count += 1
# If token is long enough
if len(token) >= self._minlength:
backoff_tag = self._backoff_tag_one(token)
if tag != backoff_tag:
# get the affix and record it
affix = self._get_affix(token)
hit_count += 1
fd[affix].inc(tag)
for affix in fd.conditions():
best_tag = fd[affix].max()
if fd[affix].count(best_tag) > self._cutoff:
self._model[affix] = best_tag
# generate stats
if verbose:
size = len(self._model)
backoff = 100 - (hit_count * 100.0)/ token_count
pruning = 100 - (size * 100.0) / len(fd.conditions())
print "[Trained Affix tagger:",
print "size=%d, backoff=%.2f%%, pruning=%.2f%%]" % (
size, backoff, pruning)
def tag_one(self, token, history=None):
if self.size() == 0:
raise ValueError, 'Tagger is not trained'
affix = self._get_affix(token)
if len(token) >= self._minlength and self._model.has_key(affix):
return self._model[affix]
if self._backoff:
return self._backoff.tag_one(token, history)
return None
def size(self):
return len(self._model)
def __repr__(self):
return '<Affix Tagger: size=%d, cutoff=%d>' % (
self.size(), self._cutoff)
class Regexp(SequentialBackoff):
"""
A tagger that assigns tags to words based on regular expressions.
"""
def __init__(self, regexps, backoff=None):
"""
Construct a new regexp tagger.
@type regexps: C{list} of C{(string,string)}
@param regexps: A list of C{(regexp,tag)} pairs, each of
which indicates that a word matching C{regexp} should
be tagged with C{tag}. The pairs will be evalutated in
order. If none of the regexps match a word, then the
optional backoff tagger is invoked, else it is
assigned the tag C{None}.
"""
self._regexps = regexps
self._backoff = backoff
self._history = None
def tag_one(self, token, history=None):
for regexp, tag in self._regexps:
if re.match(regexp, token): # ignore history
return tag
if self._backoff:
return self._backoff.tag_one(token, history)
return None
def __repr__(self):
return '<Regexp Tagger: size=%d>' % len(self._regexps)
class Lookup(SequentialBackoff):
"""
A tagger that assigns tags to words based on a lookup table.
"""
def __init__(self, table, backoff=None):
"""
Construct a new lookup tagger.
@type table: C{dict} from C{string} to C{string}
@param table: A dictionary mapping words to tags,
which indicates that a particular Cword should be assigned
a given Ctag. If none of the regexps match a word, then the
optional backoff tagger is invoked, else it is
assigned the tag C{None}.
"""
self._table = table
self._backoff = backoff
self._history = None
def tag_one(self, token, history=None):
if token in self._table:
return self._table[token]
if self._backoff:
return self._backoff.tag_one(token, history)
return None
def __repr__(self):
return '<Lookup Tagger: size=%d>' % len(self._table)
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _demo_tagger(tagger, gold):
from en.parser.nltk_lite.tag import accuracy
acc = accuracy(tagger, gold)
print 'Accuracy = %4.1f%%' % (100.0 * acc)
def demo():
"""
A simple demonstration function for the C{Tagger} classes. It
constructs a backoff tagger using a trigram tagger, bigram tagger
unigram tagger and a default tagger. It trains and tests the
tagger using the Brown corpus.
"""
from en.parser.nltk_lite.corpora import brown
from en.parser.nltk_lite import tag
import sys
print 'Training taggers.'
# Create a default tagger
t0 = tag.Default('nn')
t1 = tag.Unigram(cutoff=1, backoff=t0)
t1.train(brown.tagged('a'), verbose=True)
t2 = tag.Affix(-3, 5, cutoff=2, backoff=t0)
t2.train(brown.tagged('a'), verbose=True)
t3 = tag.Regexp([(r'.*ed', 'vbd')], backoff=t0) # no training
t4 = tag.Lookup({'the': 'dt'}, backoff=t0)
test_tokens = []
num_words = 0
print '='*75
print 'Running the taggers on test data...'
print ' Default (nn) tagger: ',
sys.stdout.flush()
_demo_tagger(t0, brown.tagged('b'))
print ' Unigram tagger: ',
sys.stdout.flush()
_demo_tagger(t1, list(brown.tagged('b'))[:1000])
print ' Affix tagger: ',
sys.stdout.flush()
_demo_tagger(t2, list(brown.tagged('b'))[:1000])
print ' Regexp tagger: ',
sys.stdout.flush()
_demo_tagger(t3, list(brown.tagged('b'))[:1000])
print ' Lookup tagger: ',
sys.stdout.flush()
_demo_tagger(t4, list(brown.tagged('b'))[:1000])
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Brill Tagger
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Authors: Christopher Maloof <cjmaloof@gradient.cis.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Brill's transformational rule-based tagger.
"""
from en.parser.nltk_lite.tag import TagI
import bisect # for binary search through a subset of indices
import os # for finding WSJ files
import random # for shuffling WSJ files
import sys # for getting command-line arguments
######################################################################
## The Brill Tagger
######################################################################
class Brill(TagI):
"""
Brill's transformational rule-based tagger. Brill taggers use an
X{initial tagger} (such as L{tag.Default}) to assign an intial
tag sequence to a text; and then apply an ordered list of
transformational rules to correct the tags of individual tokens.
These transformation rules are specified by the L{BrillRuleI}
interface.
Brill taggers can be created directly, from an initial tagger and
a list of transformational rules; but more often, Brill taggers
are created by learning rules from a training corpus, using either
L{BrillTrainer} or L{FastBrillTrainer}.
"""
def __init__(self, initial_tagger, rules):
"""
@param initial_tagger: The initial tagger
@type initial_tagger: L{TagI}
@param rules: An ordered list of transformation rules that
should be used to correct the initial tagging.
@type rules: C{list} of L{BrillRuleI}
"""
self._initial_tagger = initial_tagger
self._rules = rules
def rules(self):
return self._rules[:]
def tag (self, tokens):
# Inherit documentation from TagI
# Run the initial tagger.
tagged_tokens = list(self._initial_tagger.tag(tokens))
# Create a dictionary that maps each tag to a list of the
# indices of tokens that have that tag.
tag_to_positions = {}
for i, (token, tag) in enumerate(tagged_tokens):
if tag not in tag_to_positions:
tag_to_positions[tag] = set([i])
else:
tag_to_positions[tag].add(i)
# Apply each rule, in order. Only try to apply rules at
# positions that have the desired original tag.
for rule in self._rules:
# Find the positions where it might apply
positions = tag_to_positions.get(rule.original_tag(), [])
# Apply the rule at those positions.
changed = rule.apply_at(tagged_tokens, positions)
# Update tag_to_positions with the positions of tags that
# were modified.
for i in changed:
tag_to_positions[rule.original_tag()].remove(i)
if rule.replacement_tag() not in tag_to_positions:
tag_to_positions[rule.replacement_tag()] = set([i])
else:
tag_to_positions[rule.replacement_tag()].add(i)
for t in tagged_tokens:
yield t
######################################################################
## Brill Rules
######################################################################
class BrillRuleI(object):
"""
An interface for tag transformations on a tagged corpus, as
performed by brill taggers. Each transformation finds all tokens
in the corpus that are tagged with a specific X{original tag} and
satisfy a specific X{condition}, and replaces their tags with a
X{replacement tag}. For any given transformation, the original
tag, replacement tag, and condition are fixed. Conditions may
depend on the token under consideration, as well as any other
tokens in the corpus.
Brill rules must be comparable and hashable.
"""
def apply_to(self, tokens):
"""
Apply this rule everywhere it applies in the corpus. I.e.,
for each token in the corpus that is tagged with this rule's
original tag, and that satisfies this rule's condition, set
its tag to be this rule's replacement tag.
@param tokens: The tagged corpus
@type tokens: C{list} of C{tuple}
@return: The indices of tokens whose tags were changed by this
rule.
@rtype: C{list} of C{int}
"""
return self.apply_at(tokens, range(len(tokens)))
def apply_at(self, tokens, positions):
"""
Apply this rule at every position in C{positions} where it
applies to the corpus. I.e., for each position M{p} in
C{positions}, if C{tokens[M{p}]} is tagged with this rule's
original tag, and satisfies this rule's condition, then set
its tag to be this rule's replacement tag.
@param tokens: The tagged corpus
@type tokens: list of Token
@type positions: C{list} of C{int}
@param positions: The positions where the transformation is to
be tried.
@return: The indices of tokens whose tags were changed by this
rule.
@rtype: C{int}
"""
assert False, "BrillRuleI is an abstract interface"
def applies(self, tokens, index):
"""
@return: True if the rule would change the tag of
C{tokens[index]}, False otherwise
@rtype: Boolean
@param tokens: A tagged corpus
@type tokens: list of Token
@param index: The index to check
@type index: int
"""
assert False, "BrillRuleI is an abstract interface"
def original_tag(self):
"""
@return: The tag which this C{BrillRuleI} may cause to be
replaced.
@rtype: any
"""
assert False, "BrillRuleI is an abstract interface"
def replacement_tag(self):
"""
@return: the tag with which this C{BrillRuleI} may replace
another tag.
@rtype: any
"""
assert False, "BrillRuleI is an abstract interface"
# Rules must be comparable and hashable for the algorithm to work
def __eq__(self):
assert False, "Brill rules must be comparable"
def __hash__(self):
assert False, "Brill rules must be hashable"
class ProximateTokensRule(BrillRuleI):
"""
An abstract base class for brill rules whose condition checks for
the presence of tokens with given properties at given ranges of
positions, relative to the token.
Each subclass of proximate tokens brill rule defines a method
M{extract_property}, which extracts a specific property from the
the token, such as its text or tag. Each instance is
parameterized by a set of tuples, specifying ranges of positions
and property values to check for in those ranges:
- (M{start}, M{end}, M{value})
The brill rule is then applicable to the M{n}th token iff:
- The M{n}th token is tagged with the rule's original tag; and
- For each (M{start}, M{end}, M{value}) triple:
- The property value of at least one token between
M{n+start} and M{n+end} (inclusive) is M{value}.
For example, a proximate token brill template with M{start=end=-1}
generates rules that check just the property of the preceding
token. Note that multiple properties may be included in a single
rule; the rule applies if they all hold.
"""
def __init__(self, original_tag, replacement_tag, *conditions):
"""
Construct a new brill rule that changes a token's tag from
C{original_tag} to C{replacement_tag} if all of the properties
specified in C{conditions} hold.
@type conditions: C{tuple} of C{(int, int, *)}
@param conditions: A list of 3-tuples C{(start, end, value)},
each of which specifies that the property of at least one
token between M{n}+C{start} and M{n}+C{end} (inclusive) is
C{value}.
@raise ValueError: If C{start}>C{end} for any condition.
"""
assert self.__class__ != ProximateTokensRule, \
"ProximateTokensRule is an abstract base class"
self._original = original_tag
self._replacement = replacement_tag
self._conditions = conditions
for (s,e,v) in conditions:
if s>e:
raise ValueError('Condition %s has an invalid range' %
((s,e,v),))
def extract_property(token): # [staticmethod]
"""
Returns some property characterizing this token, such as its
base lexical item or its tag.
Each implentation of this method should correspond to an
implementation of the method with the same name in a subclass
of L{ProximateTokensTemplate}.
@param token: The token
@type token: Token
@return: The property
@rtype: any
"""
assert False, "ProximateTokensRule is an abstract interface"
extract_property = staticmethod(extract_property)
def apply_at(self, tokens, positions):
# Inherit docs from BrillRuleI
# Find all locations where the rule is applicable
change = []
for i in positions:
if self.applies(tokens, i):
change.append(i)
# Make the changes. Note: this must be done in a separate
# step from finding applicable locations, since we don't want
# the rule to interact with itself.
for i in change:
(token, tag) = tokens[i]
tokens[i] = (token, self._replacement)
return change
def applies(self, tokens, index):
# Inherit docs from BrillRuleI
# Does the given token have this rule's "original tag"?
if tokens[index][1] != self._original:
return False
# Check to make sure that every condition holds.
for (start, end, val) in self._conditions:
# Find the (absolute) start and end indices.
s = max(0, index+start)
e = min(index+end+1, len(tokens))
# Look for *any* token that satisfies the condition.
for i in range(s, e):
if self.extract_property(tokens[i]) == val:
break
else:
# No token satisfied the condition; return false.
return False
# Every condition checked out, so the rule is applicable.
return True
def original_tag(self):
# Inherit docs from BrillRuleI
return self._original
def replacement_tag(self):
# Inherit docs from BrillRuleI
return self._replacement
def __eq__(self, other):
return (other != None and
other.__class__ == self.__class__ and
self._original == other._original and
self._replacement == other._replacement and
self._conditions == other._conditions)
def __hash__(self):
# Needs to include extract_property in order to distinguish subclasses
# A nicer way would be welcome.
return hash( (self._original, self._replacement, self._conditions,
self.extract_property.func_code) )
def __repr__(self):
conditions = ' and '.join(['%s in %d...%d' % (v,s,e)
for (s,e,v) in self._conditions])
return '<%s: %s->%s if %s>' % (self.__class__.__name__,
self._original, self._replacement,
conditions)
def __str__(self):
replacement = '%s -> %s' % (self._original,
self._replacement)
if len(self._conditions) == 0:
conditions = ''
else:
conditions = ' if '+ ', and '.join([self._condition_to_str(c)
for c in self._conditions])
return replacement+conditions
def _condition_to_str(self, condition):
"""
Return a string representation of the given condition.
This helper method is used by L{__str__}.
"""
(start, end, value) = condition
return ('the %s of %s is %r' %
(self.PROPERTY_NAME, self._range_to_str(start, end), value))
def _range_to_str(self, start, end):
"""
Return a string representation for the given range. This
helper method is used by L{__str__}.
"""
if start == end == 0:
return 'this word'
if start == end == -1:
return 'the preceding word'
elif start == end == 1:
return 'the following word'
elif start == end and start < 0:
return 'word i-%d' % -start
elif start == end and start > 0:
return 'word i+%d' % start
else:
if start >= 0: start = '+%d' % start
if end >= 0: end = '+%d' % end
return 'words i%s...i%s' % (start, end)
class ProximateTagsRule(ProximateTokensRule):
"""
A rule which examines the tags of nearby tokens.
@see: superclass L{ProximateTokensRule} for details.
@see: L{ProximateTagsTemplate}, which generates these rules.
"""
PROPERTY_NAME = 'tag' # for printing.
def extract_property(token): # [staticmethod]
"""@return: The given token's tag."""
return token[1]
extract_property = staticmethod(extract_property)
class ProximateWordsRule(ProximateTokensRule):
"""
A rule which examines the base types of nearby tokens.
@see: L{ProximateTokensRule} for details.
@see: L{ProximateWordsTemplate}, which generates these rules.
"""
PROPERTY_NAME = 'text' # for printing.
def extract_property(token): # [staticmethod]
"""@return: The given token's text."""
return token[0]
extract_property = staticmethod(extract_property)
######################################################################
## Brill Templates
######################################################################
class BrillTemplateI(object):
"""
An interface for generating lists of transformational rules that
apply at given corpus positions. C{BrillTemplateI} is used by
C{Brill} training algorithms to generate candidate rules.
"""
def __init__(self):
raise AssertionError, "BrillTemplateI is an abstract interface"
def applicable_rules(self, tokens, i, correctTag):
"""
Return a list of the transformational rules that would correct
the C{i}th subtoken's tag in the given token. In particular,
return a list of zero or more rules that would change
C{tagged_tokens[i][1]} to C{correctTag}, if applied
to C{token}.
If the C{i}th subtoken already has the correct tag (i.e., if
C{tagged_tokens[i][1]} == C{correctTag}), then
C{applicable_rules} should return the empty list.
@param token: The tagged tokens being tagged.
@type token: C{list} of C{tuple}
@param i: The index of the token whose tag should be corrected.
@type i: C{int}
@param correctTag: The correct tag for the C{i}th token.
@type correctTag: (any)
@rtype: C{list} of L{BrillRuleI}
"""
raise AssertionError, "BrillTemplateI is an abstract interface"
def get_neighborhood(self, token, index):
"""
Returns the set of indices C{i} such that
C{applicable_rules(token, index, ...)} depends on the value of
the C{i}th subtoken of C{token}.
This method is used by the \"fast\" Brill tagger trainer.
@param token: The tokens being tagged.
@type token: C{list} of C{tuple}
@param index: The index whose neighborhood should be returned.
@type index: C{int}
@rtype: C{Set}
"""
raise AssertionError, "BrillTemplateI is an abstract interface"
class ProximateTokensTemplate(BrillTemplateI):
"""
An brill templates that generates a list of
L{ProximateTokensRule}s that apply at a given corpus
position. In particular, each C{ProximateTokensTemplate} is
parameterized by a proximate token brill rule class and a list of
boundaries, and generates all rules that:
- use the given brill rule class
- use the given list of boundaries as the C{start} and C{end}
points for their conditions
- are applicable to the given token.
"""
def __init__(self, rule_class, *boundaries):
"""
Construct a template for generating proximate token brill
rules.
@type rule_class: C{class}
@param rule_class: The proximate token brill rule class that
should be used to generate new rules. This class must be a
subclass of L{ProximateTokensRule}.
@type boundaries: C{tuple} of C{(int, int)}
@param boundaries: A list of tuples C{(start, end)}, each of
which specifies a range for which a condition should be
created by each rule.
@raise ValueError: If C{start}>C{end} for any boundary.
"""
self._rule_class = rule_class
self._boundaries = boundaries
for (s,e) in boundaries:
if s>e:
raise ValueError('Boundary %s has an invalid range' %
((s,e),))
def applicable_rules(self, tokens, index, correct_tag):
if tokens[index][1] == correct_tag:
return []
# For each of this template's boundaries, Find the conditions
# that are applicable for the given token.
applicable_conditions = \
[self._applicable_conditions(tokens, index, start, end)
for (start, end) in self._boundaries]
# Find all combinations of these applicable conditions. E.g.,
# if applicable_conditions=[[A,B], [C,D]], then this will
# generate [[A,C], [A,D], [B,C], [B,D]].
condition_combos = [[]]
for conditions in applicable_conditions:
condition_combos = [old_conditions+[new_condition]
for old_conditions in condition_combos
for new_condition in conditions]
# Translate the condition sets into rules.
return [self._rule_class(tokens[index][1], correct_tag, *conds)
for conds in condition_combos]
def _applicable_conditions(self, tokens, index, start, end):
"""
@return: A set of all conditions for proximate token rules
that are applicable to C{tokens[index]}, given boundaries of
C{(start, end)}. I.e., return a list of all tuples C{(start,
end, M{value})}, such the property value of at least one token
between M{index+start} and M{index+end} (inclusive) is
M{value}.
"""
conditions = set()
s = max(0, index+start)
e = min(index+end+1, len(tokens))
for i in range(s, e):
value = self._rule_class.extract_property(tokens[i])
conditions.add( (start, end, value) )
return conditions
def get_neighborhood(self, tokens, index):
# inherit docs from BrillTemplateI
neighborhood = set([index])
for (start, end) in self._boundaries:
s = max(0, index+start)
e = min(index+end+1, len(tokens))
for i in range(s, e):
neighborhood.add(i)
return neighborhood
class SymmetricProximateTokensTemplate(BrillTemplateI):
"""
Simulates two L{ProximateTokensTemplate}s which are symmetric
across the location of the token. For rules of the form \"If the
M{n}th token is tagged C{A}, and any tag preceding B{or} following
the M{n}th token by a distance between M{x} and M{y} is C{B}, and
... , then change the tag of the nth token from C{A} to C{C}.\"
One C{ProximateTokensTemplate} is formed by passing in the
same arguments given to this class's constructor: tuples
representing intervals in which a tag may be found. The other
C{ProximateTokensTemplate} is constructed with the negative
of all the arguments in reversed order. For example, a
C{SymmetricProximateTokensTemplate} using the pair (-2,-1) and the
constructor C{ProximateTagsTemplate} generates the same rules as a
C{ProximateTagsTemplate} using (-2,-1) plus a second
C{ProximateTagsTemplate} using (1,2).
This is useful because we typically don't want templates to
specify only \"following\" or only \"preceding\"; we'd like our
rules to be able to look in either direction.
"""
def __init__(self, rule_class, *boundaries):
"""
Construct a template for generating proximate token brill
rules.
@type rule_class: C{class}
@param rule_class: The proximate token brill rule class that
should be used to generate new rules. This class must be a
subclass of L{ProximateTokensRule}.
@type boundaries: C{tuple} of C{(int, int)}
@param boundaries: A list of tuples C{(start, end)}, each of
which specifies a range for which a condition should be
created by each rule.
@raise ValueError: If C{start}>C{end} for any boundary.
"""
self._ptt1 = ProximateTokensTemplate(rule_class, *boundaries)
reversed = [(-e,-s) for (s,e) in boundaries]
self._ptt2 = ProximateTokensTemplate(rule_class, *reversed)
# Generates lists of a subtype of ProximateTokensRule.
def applicable_rules(self, tokens, index, correctTag):
"""
See L{BrillTemplateI} for full specifications.
@rtype: list of ProximateTokensRule
"""
return (self._ptt1.applicable_rules(tokens, index, correctTag) +
self._ptt2.applicable_rules(tokens, index, correctTag))
def get_neighborhood(self, tokens, index):
# inherit docs from BrillTemplateI
n1 = self._ptt1.get_neighborhood(tokens, index)
n2 = self._ptt2.get_neighborhood(tokens, index)
return n1.union(n2)
######################################################################
## Brill Tagger Trainer
######################################################################
class BrillTrainer(object):
"""
A trainer for brill taggers.
"""
def __init__(self, initial_tagger, templates, trace=0):
self._initial_tagger = initial_tagger
self._templates = templates
self._trace = trace
#////////////////////////////////////////////////////////////
# Training
#////////////////////////////////////////////////////////////
def train(self, train_tokens, max_rules=200, min_score=2):
"""
Trains the Brill tagger on the corpus C{train_token},
producing at most C{max_rules} transformations, each of which
reduces the net number of errors in the corpus by at least
C{min_score}.
@type train_tokens: C{list} of L{tuple}
@param train_tokens: The corpus of tagged tokens
@type max_rules: C{int}
@param max_rules: The maximum number of transformations to be created
@type min_score: C{int}
@param min_score: The minimum acceptable net error reduction
that each transformation must produce in the corpus.
"""
if self._trace > 0: print ("Training Brill tagger on %d tokens..." %
len(train_tokens))
# Create a new copy of the training token, and run the initial
# tagger on this. We will progressively update this test
# token to look more like the training token.
test_tokens = list(self._initial_tagger.tag(t[0] for t in train_tokens))
if self._trace > 2: self._trace_header()
# Look for useful rules.
rules = []
try:
while len(rules) < max_rules:
old_tags = [t[1] for t in test_tokens]
(rule, score, fixscore) = self._best_rule(test_tokens,
train_tokens)
if rule is None or score < min_score:
if self._trace > 1:
print 'Insufficient improvement; stopping'
break
else:
# Add the rule to our list of rules.
rules.append(rule)
# Use the rules to update the test token.
k = rule.apply_to(test_tokens)
# Display trace output.
if self._trace > 1:
self._trace_rule(rule, score, fixscore, len(k))
# The user can also cancel training manually:
except KeyboardInterrupt: pass
# Create and return a tagger from the rules we found.
return Brill(self._initial_tagger, rules)
#////////////////////////////////////////////////////////////
# Finding the best rule
#////////////////////////////////////////////////////////////
# Finds the rule that makes the biggest net improvement in the corpus.
# Returns a (rule, score) pair.
def _best_rule(self, test_tokens, train_tokens):
# Create a dictionary mapping from each tag to a list of the
# indices that have that tag in both test_tokens and
# train_tokens (i.e., where it is correctly tagged).
correct_indices = {}
for i in range(len(test_tokens)):
if test_tokens[i][1] == train_tokens[i][1]:
tag = test_tokens[i][1]
correct_indices.setdefault(tag, []).append(i)
# Find all the rules that correct at least one token's tag,
# and the number of tags that each rule corrects (in
# descending order of number of tags corrected).
rules = self._find_rules(test_tokens, train_tokens)
# Keep track of the current best rule, and its score.
best_rule, best_score, best_fixscore = None, 0, 0
# Consider each rule, in descending order of fixscore (the
# number of tags that the rule corrects, not including the
# number that it breaks).
for (rule, fixscore) in rules:
# The actual score must be <= fixscore; so if best_score
# is bigger than fixscore, then we already have the best
# rule.
if best_score >= fixscore:
return best_rule, best_score, best_fixscore
# Calculate the actual score, by decrementing fixscore
# once for each tag that the rule changes to an incorrect
# value.
score = fixscore
if correct_indices.has_key(rule.original_tag()):
for i in correct_indices[rule.original_tag()]:
if rule.applies(test_tokens, i):
score -= 1
# If the score goes below best_score, then we know
# that this isn't the best rule; so move on:
if score <= best_score: break
#print '%5d %5d %s' % (fixscore, score, rule)
# If the actual score is better than the best score, then
# update best_score and best_rule.
if score > best_score:
best_rule, best_score, best_fixscore = rule, score, fixscore
# Return the best rule, and its score.
return best_rule, best_score, best_fixscore
def _find_rules(self, test_tokens, train_tokens):
"""
Find all rules that correct at least one token's tag in
C{test_tokens}.
@return: A list of tuples C{(rule, fixscore)}, where C{rule}
is a brill rule and C{fixscore} is the number of tokens
whose tag the rule corrects. Note that C{fixscore} does
I{not} include the number of tokens whose tags are changed
to incorrect values.
"""
# Create a list of all indices that are incorrectly tagged.
error_indices = [i for i in range(len(test_tokens))
if (test_tokens[i][1] !=
train_tokens[i][1])]
# Create a dictionary mapping from rules to their positive-only
# scores.
rule_score_dict = {}
for i in range(len(test_tokens)):
rules = self._find_rules_at(test_tokens, train_tokens, i)
for rule in rules:
rule_score_dict[rule] = rule_score_dict.get(rule,0) + 1
# Convert the dictionary into a list of (rule, score) tuples,
# sorted in descending order of score.
rule_score_items = rule_score_dict.items()
temp = [(-score, rule) for (rule, score) in rule_score_items]
temp.sort()
return [(rule, -negscore) for (negscore, rule) in temp]
def _find_rules_at(self, test_tokens, train_tokens, i):
"""
@rtype: C{Set}
@return: the set of all rules (based on the templates) that
correct token C{i}'s tag in C{test_tokens}.
"""
applicable_rules = set()
if test_tokens[i][1] != train_tokens[i][1]:
correct_tag = train_tokens[i][1]
for template in self._templates:
new_rules = template.applicable_rules(test_tokens, i,
correct_tag)
applicable_rules.update(new_rules)
return applicable_rules
#////////////////////////////////////////////////////////////
# Tracing
#////////////////////////////////////////////////////////////
def _trace_header(self):
print """
B |
S F r O | Score = Fixed - Broken
c i o t | R Fixed = num tags changed incorrect -> correct
o x k h | u Broken = num tags changed correct -> incorrect
r e e e | l Other = num tags changed incorrect -> incorrect
e d n r | e
------------------+-------------------------------------------------------
""".rstrip()
def _trace_rule(self, rule, score, fixscore, numchanges):
if self._trace > 2:
print ('%4d%4d%4d%4d ' % (score, fixscore, fixscore-score,
numchanges-fixscore*2+score)), '|',
print rule
######################################################################
## Fast Brill Tagger Trainer
######################################################################
class FastBrillTrainer(object):
"""
A faster trainer for brill taggers.
"""
def __init__(self, initial_tagger, templates, trace=0):
self._initial_tagger = initial_tagger
self._templates = templates
self._trace = trace
#////////////////////////////////////////////////////////////
# Training
#////////////////////////////////////////////////////////////
def train(self, train_tokens, max_rules=200, min_score=2):
# If TESTING is true, extra computation is done to determine whether
# each "best" rule actually reduces net error by the score it received.
TESTING = False
# Basic idea: Keep track of the rules that apply at each position.
# And keep track of the positions to which each rule applies.
# The set of somewhere-useful rules that apply at each position
rulesByPosition = []
for i in range(len(train_tokens)):
rulesByPosition.append(set())
# Mapping somewhere-useful rules to the positions where they apply.
# Then maps each position to the score change the rule generates there.
# (always -1, 0, or 1)
positionsByRule = {}
# Map scores to sets of rules known to achieve *at most* that score.
rulesByScore = {0:{}}
# Conversely, map somewhere-useful rules to their minimal scores.
ruleScores = {}
tagIndices = {} # Lists of indices, mapped to by their tags
# Maps rules to the first index in the corpus where it may not be known
# whether the rule applies. (Rules can't be chosen for inclusion
# unless this value = len(corpus). But most rules are bad, and
# we won't need to check the whole corpus to know that.)
# Some indices past this may actually have been checked; it just isn't
# guaranteed.
firstUnknownIndex = {}
# Make entries in the rule-mapping dictionaries.
# Should be called before _updateRuleApplies.
def _initRule (rule):
positionsByRule[rule] = {}
rulesByScore[0][rule] = None
ruleScores[rule] = 0
firstUnknownIndex[rule] = 0
# Takes a somewhere-useful rule which applies at index i;
# Updates all rule data to reflect that the rule so applies.
def _updateRuleApplies (rule, i):
# If the rule is already known to apply here, ignore.
# (This only happens if the position's tag hasn't changed.)
if positionsByRule[rule].has_key(i):
return
if rule.replacement_tag() == train_tokens[i][1]:
positionsByRule[rule][i] = 1
elif rule.original_tag() == train_tokens[i][1]:
positionsByRule[rule][i] = -1
else: # was wrong, remains wrong
positionsByRule[rule][i] = 0
# Update rules in the other dictionaries
del rulesByScore[ruleScores[rule]][rule]
ruleScores[rule] += positionsByRule[rule][i]
if not rulesByScore.has_key(ruleScores[rule]):
rulesByScore[ruleScores[rule]] = {}
rulesByScore[ruleScores[rule]][rule] = None
rulesByPosition[i].add(rule)
# Takes a rule which no longer applies at index i;
# Updates all rule data to reflect that the rule doesn't apply.
def _updateRuleNotApplies (rule, i):
del rulesByScore[ruleScores[rule]][rule]
ruleScores[rule] -= positionsByRule[rule][i]
if not rulesByScore.has_key(ruleScores[rule]):
rulesByScore[ruleScores[rule]] = {}
rulesByScore[ruleScores[rule]][rule] = None
del positionsByRule[rule][i]
rulesByPosition[i].remove(rule)
# Optional addition: if the rule now applies nowhere, delete
# all its dictionary entries.
tagged_tokens = list(self._initial_tagger.tag(t[0] for t in train_tokens))
# First sort the corpus by tag, and also note where the errors are.
errorIndices = [] # only used in initialization
for i in range(len(tagged_tokens)):
tag = tagged_tokens[i][1]
if tag != train_tokens[i][1]:
errorIndices.append(i)
if not tagIndices.has_key(tag):
tagIndices[tag] = []
tagIndices[tag].append(i)
print "Finding useful rules..."
# Collect all rules that fix any errors, with their positive scores.
for i in errorIndices:
for template in self._templates:
# Find the templated rules that could fix the error.
for rule in template.applicable_rules(tagged_tokens, i,
train_tokens[i][1]):
if not positionsByRule.has_key(rule):
_initRule(rule)
_updateRuleApplies(rule, i)
print "Done initializing %i useful rules." %len(positionsByRule)
if TESTING:
after = -1 # bug-check only
# Each iteration through the loop tries a new maxScore.
maxScore = max(rulesByScore.keys())
rules = []
while len(rules) < max_rules and maxScore >= min_score:
# Find the next best rule. This is done by repeatedly taking a rule with
# the highest score and stepping through the corpus to see where it
# applies. When it makes an error (decreasing its score) it's bumped
# down, and we try a new rule with the highest score.
# When we find a rule which has the highest score AND which has been
# tested against the entire corpus, we can conclude that it's the next
# best rule.
bestRule = None
bestRules = rulesByScore[maxScore].keys()
for rule in bestRules:
# Find the first relevant index at or following the first
# unknown index. (Only check indices with the right tag.)
ti = bisect.bisect_left(tagIndices[rule.original_tag()],
firstUnknownIndex[rule])
for nextIndex in tagIndices[rule.original_tag()][ti:]:
if rule.applies(tagged_tokens, nextIndex):
_updateRuleApplies(rule, nextIndex)
if ruleScores[rule] < maxScore:
firstUnknownIndex[rule] = nextIndex+1
break # the _update demoted the rule
# If we checked all remaining indices and found no more errors:
if ruleScores[rule] == maxScore:
firstUnknownIndex[rule] = len(tagged_tokens) # i.e., we checked them all
print "%i) %s (score: %i)" %(len(rules)+1, rule, maxScore)
bestRule = rule
break
if bestRule == None: # all rules dropped below maxScore
del rulesByScore[maxScore]
maxScore = max(rulesByScore.keys())
continue # with next-best rules
# bug-check only
if TESTING:
before = len(_errorPositions(tagged_tokens, train_tokens))
print "There are %i errors before applying this rule." %before
assert after == -1 or before == after, \
"after=%i but before=%i" %(after,before)
print "Applying best rule at %i locations..." \
%len(positionsByRule[bestRule].keys())
# If we reach this point, we've found a new best rule.
# Apply the rule at the relevant sites.
# (apply_at is a little inefficient here, since we know the rule applies
# and don't actually need to test it again.)
rules.append(bestRule)
bestRule.apply_at(tagged_tokens, positionsByRule[bestRule].keys())
# Update the tag index accordingly.
for i in positionsByRule[bestRule].keys(): # where it applied
# Update positions of tags
# First, find and delete the index for i from the old tag.
oldIndex = bisect.bisect_left(tagIndices[bestRule.original_tag()], i)
del tagIndices[bestRule.original_tag()][oldIndex]
# Then, insert i into the index list of the new tag.
if not tagIndices.has_key(bestRule.replacement_tag()):
tagIndices[bestRule.replacement_tag()] = []
newIndex = bisect.bisect_left(tagIndices[bestRule.replacement_tag()], i)
tagIndices[bestRule.replacement_tag()].insert(newIndex, i)
# This part is tricky.
# We need to know which sites might now require new rules -- that
# is, which sites are close enough to the changed site so that
# a template might now generate different rules for it.
# Only the templates can know this.
#
# If a template now generates a different set of rules, we have
# to update our indices to reflect that.
print "Updating neighborhoods of changed sites.\n"
# First, collect all the indices that might get new rules.
neighbors = set()
for i in positionsByRule[bestRule].keys(): # sites changed
for template in self._templates:
neighbors.update(template.get_neighborhood(tagged_tokens, i))
# Then collect the new set of rules for each such index.
c = d = e = 0
for i in neighbors:
siteRules = set()
for template in self._templates:
# Get a set of the rules that the template now generates
siteRules.update(set(template.applicable_rules(
tagged_tokens, i, train_tokens[i][1])))
# Update rules no longer generated here by any template
for obsolete in rulesByPosition[i] - siteRules:
c += 1
_updateRuleNotApplies(obsolete, i)
# Update rules only now generated by this template
for newRule in siteRules - rulesByPosition[i]:
d += 1
if not positionsByRule.has_key(newRule):
e += 1
_initRule(newRule) # make a new rule w/score=0
_updateRuleApplies(newRule, i) # increment score, etc.
if TESTING:
after = before - maxScore
print "%i obsolete rule applications, %i new ones, " %(c,d)+ \
"using %i previously-unseen rules." %e
maxScore = max(rulesByScore.keys()) # may have gone up
if self._trace > 0: print ("Training Brill tagger on %d tokens..." %
len(train_tokens))
# Maintain a list of the rules that apply at each position.
rules_by_position = [{} for tok in train_tokens]
# Create and return a tagger from the rules we found.
return Brill(self._initial_tagger, rules)
######################################################################
## Testing
######################################################################
def _errorPositions (train_tokens, tokens):
return [i for i in range(len(tokens))
if tokens[i][1] !=
train_tokens[i][1] ]
# returns a list of errors in string format
def errorList (train_tokens, tokens, radius=2):
"""
Returns a list of human-readable strings indicating the errors in the
given tagging of the corpus.
@param train_tokens: The correct tagging of the corpus
@type train_tokens: C{list} of C{tuple}
@param tokens: The tagged corpus
@type tokens: C{list} of C{tuple}
@param radius: How many tokens on either side of a wrongly-tagged token
to include in the error string. For example, if C{radius}=2, each error
string will show the incorrect token plus two tokens on either side.
@type radius: int
"""
errors = []
indices = _errorPositions(train_tokens, tokens)
tokenLen = len(tokens)
for i in indices:
ei = tokens[i][1].rjust(3) + " -> " \
+ train_tokens[i][1].rjust(3) + ": "
for j in range( max(i-radius, 0), min(i+radius+1, tokenLen) ):
if tokens[j][0] == tokens[j][1]:
s = tokens[j][0] # don't print punctuation tags
else:
s = tokens[j][0] + "/" + tokens[j][1]
if j == i:
ei += "**"+s+"** "
else:
ei += s + " "
errors.append(ei)
return errors
#####################################################################################
# Demonstration
#####################################################################################
def demo(num_sents=100, max_rules=200, min_score=2, error_output = "errors.out",
rule_output="rules.out", randomize=False, train=.8, trace=3):
"""
Brill Tagger Demonstration
@param num_sents: how many sentences of training and testing data to use
@type num_sents: L{int}
@param max_rules: maximum number of rule instances to create
@type max_rules: L{int}
@param min_score: the minimum score for a rule in order for it to be considered
@type min_score: L{int}
@param error_output: the file where errors will be saved
@type error_output: L{string}
@param rule_output: the file where rules will be saved
@type rule_output: L{string}
@param randomize: whether the training data should be a random subset of the corpus
@type randomize: L{boolean}
@param train: the fraction of the the corpus to be used for training (1=all)
@type train: L{float}
@param trace: the level of diagnostic tracing output to produce (0-3)
@type train: L{int}
"""
from en.parser.nltk_lite.corpora import treebank
from en.parser.nltk_lite import tag
from en.parser.nltk_lite.tag import brill
NN_CD_tagger = tag.Regexp([(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')])
# train is the proportion of data used in training; the rest is reserved
# for testing.
print "Loading tagged data..."
sents = list(treebank.tagged())
if randomize:
random.seed(len(sents))
random.shuffle(sents)
tagged_data = [t for s in sents[:num_sents] for t in s]
cutoff = int(len(tagged_data)*train)
training_data = tagged_data[:cutoff]
gold_data = tagged_data[cutoff:]
testing_data = [t[0] for t in gold_data]
# Unigram tagger
print "Training unigram tagger:",
u = tag.Unigram(backoff=NN_CD_tagger)
# NB training and testing are required to use a list-of-lists structure,
# so we wrap the flattened corpus data with the extra list structure.
u.train([training_data])
print("[accuracy: %f]" % tag.accuracy(u, [gold_data]))
# Brill tagger
templates = [
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,1)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (2,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,3)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,1)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (2,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,3)),
brill.ProximateTokensTemplate(brill.ProximateTagsRule, (-1, -1), (1,1)),
brill.ProximateTokensTemplate(brill.ProximateWordsRule, (-1, -1), (1,1)),
]
#trainer = brill.FastBrillTrainer(u, templates, trace)
trainer = brill.BrillTrainer(u, templates, trace)
b = trainer.train(training_data, max_rules, min_score)
print
print("Brill accuracy: %f" % tag.accuracy(b, [gold_data]))
print("\nRules: ")
printRules = file(rule_output, 'w')
for rule in b.rules():
print(str(rule))
printRules.write(str(rule)+"\n\n")
testing_data = list(b.tag(testing_data))
el = errorList(gold_data, testing_data)
errorFile = file(error_output, 'w')
for e in el:
errorFile.write(e+"\n\n")
errorFile.close()
print "Done; rules and errors saved to %s and %s." % (rule_output, error_output)
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Taggers
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (minor additions)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for tagging each token of a document with
supplementary information, such as its part of speech or its WordNet
synset tag. This task, which is known as X{tagging}, is defined by
the L{TagI} interface.
"""
class TagI(object):
"""
A processing interface for assigning a tag to each token in a list.
Tags are case sensitive strings that identify some property of each
token, such as its part of speech or its sense.
"""
def tag(self, tokens):
"""
Assign a tag to each token in C{tokens}, and yield a tagged token
of the form (token, tag)
"""
raise NotImplementedError()
class SequentialBackoff(TagI):
"""
A tagger that tags words sequentially, left to right.
"""
def tag(self, tokens, verbose=False):
for token in tokens:
if isinstance(token, list):
yield list(self.tag(token, verbose))
else:
tag = self.tag_one(token)
if tag == None and self._backoff:
tag = self._backoff.tag_one(token)
if self._history:
self._history.enqueue(tag)
yield (token, tag)
def tag_sents(self, sents, verbose=False):
for sent in sents:
yield list(self.tag(sent, verbose))
def _backoff_tag_one(self, token, history=None):
if self._backoff:
return self._backoff.tag_one(token, history)
else:
return None
class Default(SequentialBackoff):
"""
A tagger that assigns the same tag to every token.
"""
def __init__(self, tag):
"""
Construct a new default tagger.
@type tag: C{string}
@param tag: The tag that should be assigned to every token.
"""
self._tag = tag
self._backoff = None # cannot have a backoff tagger!
self._history = None
def tag_one(self, token, history=None):
return self._tag # ignore token and history
def __repr__(self):
return '<DefaultTagger: tag=%s>' % self._tag
##################################################################
# UTILITY FUNCTIONS
##################################################################
from en.parser.nltk_lite import tokenize
def tag2tuple(s, sep='/'):
loc = s.rfind(sep)
if loc >= 0:
return (s[:loc], s[loc+1:])
else:
return (s, None)
def untag(tagged_sentence):
return (w for (w, t) in tagged_sentence)
def string2tags(s, sep='/'):
return [tag2tuple(t, sep) for t in tokenize.whitespace(s)]
def tags2string(t, sep='/'):
return " ".join([token + sep + str(tag) for (token, tag) in t])
def string2words(s, sep='/'):
return [tag2tuple(t, sep)[0] for t in tokenize.whitespace(s)]
##################################################################
# EVALUATION
##################################################################
from en.parser.nltk_lite import evaluate
def accuracy(tagger, gold):
"""
Score the accuracy of the tagger against the gold standard.
Strip the tags from the gold standard text, retag it using
the tagger, then compute the accuracy score.
@type tagger: C{TagI}
@param tagger: The tagger being evaluated.
@type gold: C{list} of C{Token}
@param gold: The list of tagged tokens to score the tagger on.
@rtype: C{float}
"""
gold_tokens = []
test_tokens = []
for sent in gold:
sent = list(sent)
gold_tokens += sent
test_tokens += list(tagger.tag(untag(sent)))
# print 'GOLD:', gold_tokens[:50]
# print 'TEST:', test_tokens[:50]
return evaluate.accuracy(gold_tokens, test_tokens)
#############################################################
from unigram import *
from ngram import *
from brill import *
| Python |
# Natural Language Toolkit: K-Means Clusterer
#
# Copyright (C) 2004-2006 University of Melbourne
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Porting: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from en.parser.nltk_lite.cluster import *
class KMeans(VectorSpace):
"""
The K-means clusterer starts with k arbitrary chosen means then allocates
each vector to the cluster with the closest mean. It then recalculates the
means of each cluster as the centroid of the vectors in the cluster. This
process repeats until the cluster memberships stabilise. This is a
hill-climbing algorithm which may converge to a local maximum. Hence the
clustering is often repeated with random initial means and the most
commonly occuring output means are chosen.
"""
def __init__(self, num_means, distance, repeats=1,
conv_test=1e-6, initial_means=None,
normalise=False, svd_dimensions=None,
rng=None):
"""
@param num_means: the number of means to use (may use fewer)
@type num_means: int
@param distance: measure of distance between two vectors
@type distance: function taking two vectors and returing a float
@param repeats: number of randomised clustering trials to use
@type repeats: int
@param conv_test: maximum variation in mean differences before
deemed convergent
@type conv_test: number
@param initial_means: set of k initial means
@type initial_means: sequence of vectors
@param normalise: should vectors be normalised to length 1
@type normalise: boolean
@param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
@type svd_dimensions: int
@param rng: random number generator (or None)
@type rng: Random
"""
VectorSpace.__init__(self, normalise, svd_dimensions)
self._num_means = num_means
self._distance = distance
self._max_difference = conv_test
assert not initial_means or len(initial_means) == num_means
self._means = initial_means
assert repeats >= 1
assert not (initial_means and repeats > 1)
self._repeats = repeats
if rng: self._rng = rng
else: self._rng = random.Random()
def cluster_vectorspace(self, vectors, trace=False):
if self._means and self._repeats > 1:
print 'Warning: means will be discarded for subsequent trials'
meanss = []
for trial in range(self._repeats):
if trace: print 'k-means trial', trial
if not self._means or trial > 1:
self._means = self._rng.sample(vectors, self._num_means)
self._cluster_vectorspace(vectors, trace)
meanss.append(self._means)
if len(meanss) > 1:
# sort the means first (so that different cluster numbering won't
# effect the distance comparison)
for means in meanss:
means.sort(cmp = _vector_compare)
# find the set of means that's minimally different from the others
min_difference = min_means = None
for i in range(len(meanss)):
d = 0
for j in range(len(meanss)):
if i != j:
d += self._sum_distances(meanss[i], meanss[j])
if min_difference == None or d < min_difference:
min_difference, min_means = d, meanss[i]
# use the best means
self._means = min_means
def _cluster_vectorspace(self, vectors, trace=False):
if self._num_means < len(vectors):
# perform k-means clustering
converged = False
while not converged:
# assign the tokens to clusters based on minimum distance to
# the cluster means
clusters = [[] for m in range(self._num_means)]
for vector in vectors:
index = self.classify_vectorspace(vector)
clusters[index].append(vector)
if trace: print 'iteration'
#for i in range(self._num_means):
#print ' mean', i, 'allocated', len(clusters[i]), 'vectors'
# recalculate cluster means by computing the centroid of each cluster
new_means = map(self._centroid, clusters)
# measure the degree of change from the previous step for convergence
difference = self._sum_distances(self._means, new_means)
if difference < self._max_difference:
converged = True
# remember the new means
self._means = new_means
def classify_vectorspace(self, vector):
# finds the closest cluster centroid
# returns that cluster's index
best_distance = best_index = None
for index in range(len(self._means)):
mean = self._means[index]
dist = self._distance(vector, mean)
if best_distance == None or dist < best_distance:
best_index, best_distance = index, dist
return best_index
def num_clusters(self):
if self._means:
return len(self._means)
else:
return self._num_means
def means(self):
"""
The means used for clustering.
"""
return self._means
def _sum_distances(self, vectors1, vectors2):
difference = 0.0
for u, v in zip(vectors1, vectors2):
difference += self._distance(u, v)
return difference
def _centroid(self, cluster):
assert len(cluster) > 0
centroid = copy.copy(cluster[0])
for vector in cluster[1:]:
centroid += vector
return centroid / float(len(cluster))
def __repr__(self):
return '<KMeans Clusterer means=%s repeats=%d>' % \
(self._means, self._repeats)
def _vector_compare(x, y):
xs, ys = sum(x), sum(y)
if xs < ys: return -1
elif xs > ys: return 1
else: return 0
#################################################################################
def demo():
# example from figure 14.9, page 517, Manning and Schutze
from en.parser.nltk_lite import cluster
vectors = [array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]]
means = [[4, 3], [5, 5]]
clusterer = cluster.KMeans(2, euclidean_distance, initial_means=means)
clusters = clusterer.cluster(vectors, True, trace=True)
print 'Clustered:', vectors
print 'As:', clusters
print 'Means:', clusterer.means()
print
vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
# test k-means using the euclidean distance metric, 2 means and repeat
# clustering 10 times with random seeds
clusterer = cluster.KMeans(2, euclidean_distance, repeats=10)
clusters = clusterer.cluster(vectors, True)
print 'Clustered:', vectors
print 'As:', clusters
print 'Means:', clusterer.means()
print
# classify a new vector
vector = array([3, 3])
print 'classify(%s):' % vector,
print clusterer.classify(vector)
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Group Average Agglomerative Clusterer
#
# Copyright (C) 2004-2006 University of Melbourne
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Porting: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from en.parser.nltk_lite.cluster import *
class GroupAverageAgglomerative(VectorSpace):
"""
The GAAC clusterer starts with each of the N vectors as singleton
clusters. It then iteratively merges pairs of clusters which have the
closest centroids. This continues until there is only one cluster. The
order of merges gives rise to a dendogram: a tree with the earlier merges
lower than later merges. The membership of a given number of clusters c, 1
<= c <= N, can be found by cutting the dendogram at depth c.
This clusterer uses the cosine similarity metric only, which allows for
efficient speed-up in the clustering process.
"""
def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None):
VectorSpace.__init__(self, normalise, svd_dimensions)
self._num_clusters = num_clusters
self._dendogram = None
self._groups_values = None
def cluster(self, vectors, assign_clusters=False, trace=False):
# stores the merge order
self._dendogram = Dendogram(
[array(vector, numpy.float64) for vector in vectors])
return VectorSpace.cluster(self, vectors, assign_clusters, trace)
def cluster_vectorspace(self, vectors, trace=False):
# create a cluster for each vector
clusters = [[vector] for vector in vectors]
# the sum vectors
vector_sum = copy.copy(vectors)
while len(clusters) > max(self._num_clusters, 1):
# find the two best candidate clusters to merge, based on their
# S(union c_i, c_j)
best = None
for i in range(len(clusters)):
for j in range(i + 1, len(clusters)):
sim = self._average_similarity(
vector_sum[i], len(clusters[i]),
vector_sum[j], len(clusters[j]))
if not best or sim > best[0]:
best = (sim, i, j)
# merge them and replace in cluster list
i, j = best[1:]
sum = clusters[i] + clusters[j]
if trace: print 'merging %d and %d' % (i, j)
clusters[i] = sum
del clusters[j]
vector_sum[i] = vector_sum[i] + vector_sum[j]
del vector_sum[j]
self._dendogram.merge(i, j)
self.update_clusters(self._num_clusters)
def update_clusters(self, num_clusters):
clusters = self._dendogram.groups(num_clusters)
self._centroids = []
for cluster in clusters:
assert len(cluster) > 0
if self._should_normalise:
centroid = self._normalise(cluster[0])
else:
centroid = array(cluster[0])
for vector in cluster[1:]:
if self._should_normalise:
centroid += self._normalise(vector)
else:
centroid += vector
centroid /= float(len(cluster))
self._centroids.append(centroid)
self._num_clusters = len(self._centroids)
def classify_vectorspace(self, vector):
best = None
for i in range(self._num_clusters):
centroid = self._centroids[i]
sim = self._average_similarity(vector, 1, centroid, 1)
if not best or sim > best[0]:
best = (sim, i)
return best[1]
def dendogram(self):
"""
@return: The dendogram representing the current clustering
@rtype: Dendogram
"""
return self._dendogram
def num_clusters(self):
return self._num_clusters
def _average_similarity(self, v1, l1, v2, l2):
sum = v1 + v2
length = l1 + l2
return (numpy.dot(sum, sum) - length) / (length * (length - 1))
def __repr__(self):
return '<GroupAverageAgglomerative Clusterer n=%d>' % self._num_clusters
def demo():
"""
Non-interactive demonstration of the clusterers with simple 2-D data.
"""
from en.parser.nltk_lite import cluster
# use a set of tokens with 2D indices
vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
# test the GAAC clusterer with 4 clusters
clusterer = cluster.GroupAverageAgglomerative(4)
clusters = clusterer.cluster(vectors, True)
print 'Clusterer:', clusterer
print 'Clustered:', vectors
print 'As:', clusters
print
# show the dendogram
clusterer.dendogram().show()
# classify a new vector
vector = array([3, 3])
print 'classify(%s):' % vector,
print clusterer.classify(vector)
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Expectation Maximization Clusterer
#
# Copyright (C) 2004-2006 University of Melbourne
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Porting: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from en.parser.nltk_lite.cluster import *
class EM(VectorSpace):
"""
The Gaussian EM clusterer models the vectors as being produced by
a mixture of k Gaussian sources. The parameters of these sources
(prior probability, mean and covariance matrix) are then found to
maximise the likelihood of the given data. This is done with the
expectation maximisation algorithm. It starts with k arbitrarily
chosen means, priors and covariance matrices. It then calculates
the membership probabilities for each vector in each of the
clusters; this is the 'E' step. The cluster parameters are then
updated in the 'M' step using the maximum likelihood estimate from
the cluster membership probabilities. This process continues until
the likelihood of the data does not significantly increase.
"""
def __init__(self, initial_means, priors=None, covariance_matrices=None,
conv_threshold=1e-6, bias=0.1, normalise=False,
svd_dimensions=None):
"""
Creates an EM clusterer with the given starting parameters,
convergence threshold and vector mangling parameters.
@param initial_means: the means of the gaussian cluster centers
@type initial_means: [seq of] numpy array or seq of SparseArray
@param priors: the prior probability for each cluster
@type priors: numpy array or seq of float
@param covariance_matrices: the covariance matrix for each cluster
@type covariance_matrices: [seq of] numpy array
@param conv_threshold: maximum change in likelihood before deemed
convergent
@type conv_threshold: int or float
@param bias: variance bias used to ensure non-singular covariance
matrices
@type bias: float
@param normalise: should vectors be normalised to length 1
@type normalise: boolean
@param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
@type svd_dimensions: int
"""
VectorSpace.__init__(self, normalise, svd_dimensions)
self._means = array(initial_means, numpy.float64)
self._num_clusters = len(initial_means)
self._conv_threshold = conv_threshold
self._covariance_matrices = covariance_matrices
self._priors = priors
self._bias = bias
def num_clusters(self):
return self._num_clusters
def cluster_vectorspace(self, vectors, trace=False):
assert len(vectors) > 0
# set the parameters to initial values
dimensions = len(vectors[0])
means = self._means
priors = self._priors
if not priors:
priors = self._priors = numpy.ones(self._num_clusters,
numpy.float64) / self._num_clusters
covariances = self._covariance_matrices
if not covariances:
covariances = self._covariance_matrices = \
[ numpy.identity(dimensions, numpy.float64)
for i in range(self._num_clusters) ]
# do the E and M steps until the likelihood plateaus
lastl = self._loglikelihood(vectors, priors, means, covariances)
converged = False
while not converged:
if trace: print 'iteration; loglikelihood', lastl
# E-step, calculate hidden variables, h[i,j]
h = numpy.zeros((len(vectors), self._num_clusters),
numpy.float64)
for i in range(len(vectors)):
for j in range(self._num_clusters):
h[i,j] = priors[j] * self._gaussian(means[j],
covariances[j], vectors[i])
h[i,:] /= sum(h[i,:])
# M-step, update parameters - cvm, p, mean
for j in range(self._num_clusters):
covariance_before = covariances[j]
new_covariance = numpy.zeros((dimensions, dimensions),
numpy.float64)
new_mean = numpy.zeros(dimensions, numpy.float64)
sum_hj = 0.0
for i in range(len(vectors)):
delta = vectors[i] - means[j]
new_covariance += h[i,j] * \
numpy.multiply.outer(delta, delta)
sum_hj += h[i,j]
new_mean += h[i,j] * vectors[i]
covariances[j] = new_covariance / sum_hj
means[j] = new_mean / sum_hj
priors[j] = sum_hj / len(vectors)
# bias term to stop covariance matrix being singular
covariances[j] += self._bias * \
numpy.identity(dimensions, numpy.float64)
# calculate likelihood - FIXME: may be broken
l = self._loglikelihood(vectors, priors, means, covariances)
# check for convergence
if abs(lastl - l) < self._conv_threshold:
converged = True
lastl = l
def classify_vectorspace(self, vector):
best = None
for j in range(self._num_clusters):
p = self._priors[j] * self._gaussian(self._means[j],
self._covariance_matrices[j], vector)
if not best or p > best[0]:
best = (p, j)
return best[1]
def likelihood_vectorspace(self, vector, cluster):
cid = self.cluster_names().index(cluster)
return self._priors[cluster] * self._gaussian(self._means[cluster],
self._covariance_matrices[cluster], vector)
def _gaussian(self, mean, cvm, x):
m = len(mean)
assert cvm.shape == (m, m), \
'bad sized covariance matrix, %s' % str(cvm.shape)
try:
det = linalg.det(cvm)
inv = linalg.inv(cvm)
a = det ** -0.5 * (2 * numpy.pi) ** (-m / 2.0)
dx = x - mean
b = -0.5 * numpy.matrixmultiply( \
numpy.matrixmultiply(dx, inv), dx)
return a * numpy.exp(b)
except OverflowError:
# happens when the exponent is negative infinity - i.e. b = 0
# i.e. the inverse of cvm is huge (cvm is almost zero)
return 0
def _loglikelihood(self, vectors, priors, means, covariances):
llh = 0.0
for vector in vectors:
p = 0
for j in range(len(priors)):
p += priors[j] * \
self._gaussian(means[j], covariances[j], vector)
llh += numpy.log(p)
return llh
def __repr__(self):
return '<EM Clusterer means=%s>' % list(self._means)
def euclidean_distance(u, v):
"""
Returns the euclidean distance between vectors u and v. This is equivalent
to the length of the vector (u - v).
"""
diff = u - v
return math.sqrt(numpy.dot(diff, diff))
def cosine_distance(u, v):
"""
Returns the cosine of the angle between vectors v and u. This is equal to
u.v / |u||v|.
"""
return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))
def demo():
"""
Non-interactive demonstration of the clusterers with simple 2-D data.
"""
from en.parser.nltk_lite import cluster
# example from figure 14.10, page 519, Manning and Schutze
vectors = [array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]]
means = [[4, 2], [4, 2.01]]
clusterer = cluster.EM(means, bias=0.1)
clusters = clusterer.cluster(vectors, True, trace=True)
print 'Clustered:', vectors
print 'As: ', clusters
print
for c in range(2):
print 'Cluster:', c
print 'Prior: ', clusterer._priors[c]
print 'Mean: ', clusterer._means[c]
print 'Covar: ', clusterer._covariance_matrices[c]
print
# classify a new vector
vector = array([2, 2])
print 'classify(%s):' % vector,
print clusterer.classify(vector)
# show the classification probabilities
vector = array([2, 2])
print 'classification_probdist(%s):' % vector
pdist = clusterer.classification_probdist(vector)
for sample in pdist.samples():
print '%s => %.0f%%' % (sample,
pdist.prob(sample) *100)
#
# The following demo code is broken.
#
# # use a set of tokens with 2D indices
# vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
# # test the EM clusterer with means given by k-means (2) and
# # dimensionality reduction
# clusterer = cluster.KMeans(2, euclidean_distance, svd_dimensions=1)
# print 'Clusterer:', clusterer
# clusters = clusterer.cluster(vectors)
# means = clusterer.means()
# print 'Means:', clusterer.means()
# print
# clusterer = cluster.EM(means, svd_dimensions=1)
# clusters = clusterer.cluster(vectors, True)
# print 'Clusterer:', clusterer
# print 'Clustered:', str(vectors)[:60], '...'
# print 'As:', str(clusters)[:60], '...'
# print
# # classify a new vector
# vector = array([3, 3])
# print 'classify(%s):' % vector,
# print clusterer.classify(vector)
# print
# # show the classification probabilities
# vector = array([2.2, 2])
# print 'classification_probdist(%s)' % vector
# pdist = clusterer.classification_probdist(vector)
# for sample in pdist.samples():
# print '%s => %.0f%%' % (sample, pdist.prob(sample) *100)
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Clusterers
#
# Copyright (C) 2004-2006 University of Melbourne
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Porting: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This module contains a number of basic clustering algorithms. Clustering
describes the task of discovering groups of similar items with a large
collection. It is also describe as unsupervised machine learning, as the data
from which it learns is unannotated with class information, as is the case for
supervised learning. Annotated data is difficult and expensive to obtain in
the quantities required for the majority of supervised learning algorithms.
This problem, the knowledge acquisition bottleneck, is common to most natural
language processing tasks, thus fueling the need for quality unsupervised
approaches.
This module contains a k-means clusterer, E-M clusterer and a group average
agglomerative clusterer (GAAC). All these clusterers involve finding good
cluster groupings for a set of vectors in multi-dimensional space.
The K-means clusterer starts with k arbitrary chosen means then allocates each
vector to the cluster with the closest mean. It then recalculates the means of
each cluster as the centroid of the vectors in the cluster. This process
repeats until the cluster memberships stabilise. This is a hill-climbing
algorithm which may converge to a local maximum. Hence the clustering is
often repeated with random initial means and the most commonly occurring
output means are chosen.
The GAAC clusterer starts with each of the M{N} vectors as singleton clusters.
It then iteratively merges pairs of clusters which have the closest centroids.
This continues until there is only one cluster. The order of merges gives rise
to a dendogram - a tree with the earlier merges lower than later merges. The
membership of a given number of clusters M{c}, M{1 <= c <= N}, can be found by
cutting the dendogram at depth M{c}.
The Gaussian EM clusterer models the vectors as being produced by a mixture
of k Gaussian sources. The parameters of these sources (prior probability,
mean and covariance matrix) are then found to maximise the likelihood of the
given data. This is done with the expectation maximisation algorithm. It
starts with k arbitrarily chosen means, priors and covariance matrices. It
then calculates the membership probabilities for each vector in each of the
clusters - this is the 'E' step. The cluster parameters are then updated in
the 'M' step using the maximum likelihood estimate from the cluster membership
probabilities. This process continues until the likelihood of the data does
not significantly increase.
They all extend the ClusterI interface which defines common operations
available with each clusterer. These operations include.
- cluster: clusters a sequence of vectors
- classify: assign a vector to a cluster
- classification_probdist: give the probability distribution over cluster memberships
The current existing classifiers also extend cluster.VectorSpace, an
abstract class which allows for singular value decomposition (SVD) and vector
normalisation. SVD is used to reduce the dimensionality of the vector space in
such a manner as to preserve as much of the variation as possible, by
reparameterising the axes in order of variability and discarding all bar the
first d dimensions. Normalisation ensures that vectors fall in the unit
hypersphere.
Usage example (see also demo())::
vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]]
# initialise the clusterer (will also assign the vectors to clusters)
clusterer = cluster.KMeans(2, euclidean_distance)
clusterer.cluster(vectors, True)
# classify a new vector
print clusterer.classify(array([3, 3]))
Note that the vectors must use numpy array-like
objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for
efficiency when required.
"""
from en.parser.nltk_lite.probability import DictionaryProbDist
import copy, numpy, math, random, sys, types
from numpy import array, linalg
#======================================================================
# Generic interfaces
#======================================================================
class ClusterI:
"""
Interface covering basic clustering functionality.
"""
def cluster(self, vectors, assign_clusters=False):
"""
Assigns the vectors to clusters, learning the clustering parameters
from the data. Returns a cluster identifier for each vector.
"""
raise AssertionError()
def classify(self, token):
"""
Classifies the token into a cluster, setting the token's CLUSTER
parameter to that cluster identifier.
"""
raise AssertionError()
def likelihood(self, vector, label):
"""
Returns the likelihood (a float) of the token having the
corresponding cluster.
"""
if self.classify(vector) == label:
return 1.0
else:
return 0.0
def classification_probdist(self, vector):
"""
Classifies the token into a cluster, returning
a probability distribution over the cluster identifiers.
"""
likelihoods = {}
sum = 0.0
for cluster in self.cluster_names():
likelihoods[cluster] = self.likelihood(vector, cluster)
sum += likelihoods[cluster]
for cluster in self.cluster_names():
likelihoods[cluster] /= sum
return DictionaryProbDist(likelihoods)
def num_clusters(self):
"""
Returns the number of clusters.
"""
raise AssertError()
def cluster_names(self):
"""
Returns the names of the clusters.
"""
return range(self.num_clusters())
def cluster_name(self, index):
"""
Returns the names of the cluster at index.
"""
return index
class VectorSpace(ClusterI):
"""
Abstract clusterer which takes tokens and maps them into a vector space.
Optionally performs singular value decomposition to reduce the
dimensionality.
"""
def __init__(self, normalise=False, svd_dimensions=None):
"""
@param normalise: should vectors be normalised to length 1
@type normalise: boolean
@param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
@type svd_dimensions: int
"""
self._Tt = None
self._should_normalise = normalise
self._svd_dimensions = svd_dimensions
def cluster(self, vectors, assign_clusters=False, trace=False):
assert len(vectors) > 0
# normalise the vectors
if self._should_normalise:
vectors = map(self._normalise, vectors)
# use SVD to reduce the dimensionality
if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
[u, d, vt] = linalg.svd(numpy.transpose(array(vectors)))
S = d[:self._svd_dimensions] * \
numpy.identity(self._svd_dimensions, numpy.Float64)
T = u[:,:self._svd_dimensions]
Dt = vt[:self._svd_dimensions,:]
vectors = numpy.transpose(numpy.matrixmultiply(S, Dt))
self._Tt = numpy.transpose(T)
# call abstract method to cluster the vectors
self.cluster_vectorspace(vectors, trace)
# assign the vectors to clusters
if assign_clusters:
print self._Tt, vectors
return [self.classify(vector) for vector in vectors]
def cluster_vectorspace(self, vectors, trace):
"""
Finds the clusters using the given set of vectors.
"""
raise AssertionError()
def classify(self, vector):
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt != None:
vector = numpy.matrixmultiply(self._Tt, vector)
cluster = self.classify_vectorspace(vector)
return self.cluster_name(cluster)
def classify_vectorspace(self, vector):
"""
Returns the index of the appropriate cluster for the vector.
"""
raise AssertionError()
def likelihood(self, vector, label):
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt != None:
vector = numpy.matrixmultiply(self._Tt, vector)
return self.likelihood_vectorspace(vector, label)
def likelihood_vectorspace(self, vector, cluster):
"""
Returns the likelihood of the vector belonging to the cluster.
"""
predicted = self.classify_vectorspace(vector)
if cluster == predicted: return 1.0
else: return 0.0
def vector(self, vector):
"""
Returns the vector after normalisation and dimensionality reduction
"""
if self._should_normalise:
vector = self._normalise(vector)
if self._Tt != None:
vector = numpy.matrixmultiply(self._Tt, vector)
return vector
def _normalise(self, vector):
"""
Normalises the vector to unit length.
"""
return vector / math.sqrt(numpy.dot(vector, vector))
class _DendogramNode:
""" Tree node of a dendogram. """
def __init__(self, value, *children):
self._value = value
self._children = children
def leaves(self, values=True):
if self._children:
leaves = []
for child in self._children:
leaves.extend(child.leaves(values))
return leaves
elif values:
return [self._value]
else:
return [self]
def groups(self, n):
queue = [(self._value, self)]
while len(queue) < n:
priority, node = queue.pop()
if not node._children:
queue.push((priority, node))
break
for child in node._children:
if child._children:
queue.append((child._value, child))
else:
queue.append((0, child))
# makes the earliest merges at the start, latest at the end
queue.sort()
groups = []
for priority, node in queue:
groups.append(node.leaves())
return groups
class Dendogram:
"""
Represents a dendogram, a tree with a specified branching order. This
must be initialised with the leaf items, then iteratively call merge for
each branch. This class constructs a tree representing the order of calls
to the merge function.
"""
def __init__(self, items=[]):
"""
@param items: the items at the leaves of the dendogram
@type items: sequence of (any)
"""
self._items = [_DendogramNode(item) for item in items]
self._original_items = copy.copy(self._items)
self._merge = 1
def merge(self, *indices):
"""
Merges nodes at given indices in the dendogram. The nodes will be
combined which then replaces the first node specified. All other nodes
involved in the merge will be removed.
@param indices: indices of the items to merge (at least two)
@type indices: seq of int
"""
assert len(indices) >= 2
node = _DendogramNode(self._merge, *[self._items[i] for i in indices])
self._merge += 1
self._items[indices[0]] = node
for i in indices[1:]:
del self._items[i]
def groups(self, n):
"""
Finds the n-groups of items (leaves) reachable from a cut at depth n.
@param n: number of groups
@type n: int
"""
if len(self._items) > 1:
root = _DendogramNode(self._merge, *self._items)
else:
root = self._items[0]
return root.groups(n)
def show(self):
"""
Print the dendogram in ASCII art to standard out.
"""
# ASCII rendering characters
JOIN, HLINK, VLINK = '+', '-', '|'
# find the root (or create one)
if len(self._items) > 1:
root = _DendogramNode(self._merge, *self._items)
else:
root = self._items[0]
leaves = self._original_items
# find the bottom row and the best cell width
last_row = [str(leaf._value) for leaf in leaves]
width = max(map(len, last_row)) + 1
lhalf = width / 2
rhalf = width - lhalf - 1
# display functions
def format(centre, left=' ', right=' '):
return '%s%s%s' % (lhalf*left, centre, right*rhalf)
def display(str):
sys.stdout.write(str)
# for each merge, top down
queue = [(root._value, root)]
verticals = [ format(' ') for leaf in leaves ]
while queue:
priority, node = queue.pop()
child_left_leaf = map(lambda c: c.leaves(False)[0], node._children)
indices = map(leaves.index, child_left_leaf)
if child_left_leaf:
min_idx = min(indices)
max_idx = max(indices)
for i in range(len(leaves)):
if leaves[i] in child_left_leaf:
if i == min_idx: display(format(JOIN, ' ', HLINK))
elif i == max_idx: display(format(JOIN, HLINK, ' '))
else: display(format(JOIN, HLINK, HLINK))
verticals[i] = format(VLINK)
elif min_idx <= i <= max_idx:
display(format(HLINK, HLINK, HLINK))
else:
display(verticals[i])
display('\n')
for child in node._children:
if child._children:
queue.append((child._value, child))
queue.sort()
for vertical in verticals:
display(vertical)
display('\n')
# finally, display the last line
display(''.join([item.center(width) for item in last_row]))
display('\n')
def __repr__(self):
if len(self._items) > 1:
root = _DendogramNode(self._merge, *self._items)
else:
root = self._items[0]
leaves = root.leaves(False)
return '<Dendogram with %d leaves>' % len(leaves)
########################################################################
from kmeans import *
from gaac import *
from em import *
| Python |
#!/usr/local/bin/python
#
# Distutils setup script for the Natural Language Toolkit
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from distutils.core import setup, Extension
import en.parser.nltk_lite
setup(
#############################################
## Distribution Metadata
name = "nltk_lite",
description = "Natural Language Toolkit",
version = nltk_lite.__version__,
url = nltk_lite.__url__,
long_description = nltk_lite.__longdescr__,
license = nltk_lite.__license__,
keywords = nltk_lite.__keywords__,
maintainer = nltk_lite.__maintainer__,
maintainer_email = nltk_lite.__maintainer_email__,
author = nltk_lite.__author__,
author_email = nltk_lite.__author__,
# platforms = <platforms>,
#############################################
## Package List
packages = ['nltk_lite', 'nltk_lite.corpora', 'nltk_lite.test',
'nltk_lite.tokenize', 'nltk_lite.stem', 'nltk_lite.tag',
'nltk_lite.parse', 'nltk_lite.chat', 'nltk_lite.draw',
'nltk_lite.misc', 'nltk_lite.model', 'nltk_lite.cluster',
'nltk_lite.semantics', 'nltk_lite.wordnet',
'nltk_lite.contrib', 'nltk_lite.contrib.toolbox', 'nltk_lite.etree'],
)
| Python |
#
# ElementTree
# $Id: ElementInclude.py 1862 2004-06-18 07:31:02Z Fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
##
# Limited XInclude support for the ElementTree package.
##
import copy
import ElementTree
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding.
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
file = open(href)
if parse == "xml":
data = ElementTree.parse(file).getroot()
else:
data = file.read()
if encoding:
data = data.decode(encoding)
file.close()
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
| Python |
#
# ElementTree
# $Id: ElementPath.py 1858 2004-06-17 21:31:41Z Fredrik $
#
# limited xpath support for element trees
#
# history:
# 2003-05-23 fl created
# 2003-05-28 fl added support for // etc
# 2003-08-27 fl fixed parsing of periods in element names
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
##
# Implementation module for XPath support. There's usually no reason
# to import this module directly; the <b>ElementTree</b> does this for
# you, if needed.
##
import re
xpath_tokenizer = re.compile(
"(::|\.\.|\(\)|[/.*:\[\]\(\)@=])|((?:\{[^}]+\})?[^/:\[\]\(\)@=\s]+)|\s+"
).findall
class xpath_descendant_or_self:
pass
##
# Wrapper for a compiled XPath.
class Path:
##
# Create an Path instance from an XPath expression.
def __init__(self, path):
tokens = xpath_tokenizer(path)
# the current version supports 'path/path'-style expressions only
self.path = []
self.tag = None
if tokens and tokens[0][0] == "/":
raise SyntaxError("cannot use absolute path on element")
while tokens:
op, tag = tokens.pop(0)
if tag or op == "*":
self.path.append(tag or op)
elif op == ".":
pass
elif op == "/":
self.path.append(xpath_descendant_or_self())
continue
else:
raise SyntaxError("unsupported path syntax (%s)" % op)
if tokens:
op, tag = tokens.pop(0)
if op != "/":
raise SyntaxError(
"expected path separator (%s)" % (op or tag)
)
if self.path and isinstance(self.path[-1], xpath_descendant_or_self):
raise SyntaxError("path cannot end with //")
if len(self.path) == 1 and isinstance(self.path[0], type("")):
self.tag = self.path[0]
##
# Find first matching object.
def find(self, element):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return None
return nodeset[0]
for elem in element:
if elem.tag == tag:
return elem
return None
##
# Find text for first matching object.
def findtext(self, element, default=None):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return default
return nodeset[0].text or ""
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
##
# Find all matching objects.
def findall(self, element):
nodeset = [element]
index = 0
while 1:
try:
path = self.path[index]
index = index + 1
except IndexError:
return nodeset
set = []
if isinstance(path, xpath_descendant_or_self):
try:
tag = self.path[index]
if not isinstance(tag, type("")):
tag = None
else:
index = index + 1
except IndexError:
tag = None # invalid path
for node in nodeset:
new = list(node.getiterator(tag))
if new and new[0] is node:
set.extend(new[1:])
else:
set.extend(new)
else:
for node in nodeset:
for node in node:
if path == "*" or node.tag == path:
set.append(node)
if not set:
return []
nodeset = set
_cache = {}
##
# (Internal) Compile path.
def _compile(path):
p = _cache.get(path)
if p is not None:
return p
p = Path(path)
if len(_cache) >= 100:
_cache.clear()
_cache[path] = p
return p
##
# Find first matching object.
def find(element, path):
return _compile(path).find(element)
##
# Find text for first matching object.
def findtext(element, path, default=None):
return _compile(path).findtext(element, default)
##
# Find all matching objects.
def findall(element, path):
return _compile(path).findall(element)
| Python |
#
# ElementTree
# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z fredrik $
#
# light-weight XML support for Python 1.5.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas Dartsch)
# 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
#
# Copyright (c) 1999-2005 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2005 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring",
"iselement", "iterparse",
"parse",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring",
"TreeBuilder",
"VERSION", "XML",
"XMLParser", "XMLTreeBuilder",
]
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} or {@link
# #SubElement} factory functions.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import string, sys, re
class _SimpleElementPath:
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None):
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
def findall(self, element, tag):
if tag[:3] == ".//":
return element.getiterator(tag[3:])
result = []
for elem in element:
if elem.tag == tag:
result.append(elem)
return result
try:
import ElementPath
except ImportError:
# FIXME: issue warning in this case?
ElementPath = _SimpleElementPath()
# TODO: add support for custom namespace resolvers/default namespaces
# TODO: add improved support for incremental parsing
VERSION = "1.2.6"
##
# Internal element class. This class defines the Element interface,
# and provides a reference implementation of this interface.
# <p>
# You should not create instances of this class directly. Use the
# appropriate factory functions instead, such as {@link #Element}
# and {@link #SubElement}.
#
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class _ElementInterface:
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #_ElementInterface.get},
# {@link #_ElementInterface.set},
# {@link #_ElementInterface.keys}, and
# {@link #_ElementInterface.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None, if there was no text.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None, if there was no text.
tail = None # text after end tag, if any
def __init__(self, tag, attrib):
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at %x>" % (self.tag, id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return Element(tag, attrib)
##
# Returns the number of subelements.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
##
# Returns the given subelement.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
# @exception AssertionError If element is not a valid object.
def __setitem__(self, index, element):
assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Returns a list containing subelements in the given range.
#
# @param start The first subelement to return.
# @param stop The first subelement that shouldn't be returned.
# @return A sequence object containing subelements.
def __getslice__(self, start, stop):
return self._children[start:stop]
##
# Replaces a number of subelements with elements from a sequence.
#
# @param start The first subelement to replace.
# @param stop The first subelement that shouldn't be replaced.
# @param elements A sequence object with zero or more elements.
# @exception AssertionError If a sequence member is not a valid object.
def __setslice__(self, start, stop, elements):
for element in elements:
assert iselement(element)
self._children[start:stop] = list(elements)
##
# Deletes a number of subelements.
#
# @param start The first subelement to delete.
# @param stop The first subelement to leave in there.
def __delslice__(self, start, stop):
del self._children[start:stop]
##
# Adds a subelement to the end of this element.
#
# @param element The element to add.
# @exception AssertionError If a sequence member is not a valid object.
def append(self, element):
assert iselement(element)
self._children.append(element)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
# @exception AssertionError If the element is not a valid object.
def insert(self, index, element):
assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
# @exception AssertionError If the element is not a valid object.
def remove(self, element):
assert iselement(element)
self._children.remove(element)
##
# Returns all subelements. The elements are returned in document
# order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
return ElementPath.find(self, path)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
return ElementPath.findtext(self, path, default)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
return ElementPath.findall(self, path)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the text and tail attributes to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, the result
# is undefined.
#
# @param tag What tags to look for (default is to return all elements).
# @return A list or iterator containing all the matching elements.
# @defreturn list or iterator
def getiterator(self, tag=None):
nodes = []
if tag == "*":
tag = None
if tag is None or self.tag == tag:
nodes.append(self)
for node in self._children:
nodes.extend(node.getiterator(tag))
return nodes
# compatibility
_Element = _ElementInterface
##
# Element factory. This function returns an object implementing the
# standard Element interface. The exact class or type of that object
# is implementation dependent, but it will always be compatible with
# the {@link #_ElementInterface} class in this module.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def Element(tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
return _ElementInterface(tag, attrib)
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName:
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree:
def __init__(self, element=None, file=None):
assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLTreeBuilder} parser is used.
# @return The document root element.
# @defreturn Element
def parse(self, source, parser=None):
if not hasattr(source, "read"):
source = open(source, "rb")
if not parser:
parser = XMLTreeBuilder()
while 1:
data = source.read(32768)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def getiterator(self, tag=None):
assert self._root is not None
return self._root.getiterator(tag)
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.find(path)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.findtext(path, default)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
return self._root.findall(path)
##
# Writes the element tree to a file, as XML.
#
# @param file A file name, or a file object opened for writing.
# @param encoding Optional output encoding (default is US-ASCII).
def write(self, file, encoding="us-ascii"):
assert self._root is not None
if not hasattr(file, "write"):
file = open(file, "wb")
if not encoding:
encoding = "us-ascii"
elif encoding != "utf-8" and encoding != "us-ascii":
file.write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
self._write(file, self._root, encoding, {})
def _write(self, file, node, encoding, namespaces):
# write XML to file
tag = node.tag
if tag is Comment:
file.write("<!-- %s -->" % _escape_cdata(node.text, encoding))
elif tag is ProcessingInstruction:
file.write("<?%s?>" % _escape_cdata(node.text, encoding))
else:
items = node.items()
xmlns_items = [] # new namespaces in this scope
try:
if isinstance(tag, QName) or tag[:1] == "{":
tag, xmlns = fixtag(tag, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(tag)
file.write("<" + _encode(tag, encoding))
if items or xmlns_items:
items.sort() # lexical order
for k, v in items:
try:
if isinstance(k, QName) or k[:1] == "{":
k, xmlns = fixtag(k, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(k)
try:
if isinstance(v, QName):
v, xmlns = fixtag(v, namespaces)
if xmlns: xmlns_items.append(xmlns)
except TypeError:
_raise_serialization_error(v)
file.write(" %s=\"%s\"" % (_encode(k, encoding),
_escape_attrib(v, encoding)))
for k, v in xmlns_items:
file.write(" %s=\"%s\"" % (_encode(k, encoding),
_escape_attrib(v, encoding)))
if node.text or len(node):
file.write(">")
if node.text:
file.write(_escape_cdata(node.text, encoding))
for n in node:
self._write(file, n, encoding, namespaces)
file.write("</" + _encode(tag, encoding) + ">")
else:
file.write(" />")
for k, v in xmlns_items:
del namespaces[v]
if node.tail:
file.write(_escape_cdata(node.tail, encoding))
# --------------------------------------------------------------------
# helpers
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, _ElementInterface) or hasattr(element, "tag")
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
def _encode(s, encoding):
try:
return s.encode(encoding)
except AttributeError:
return s # 1.5.2: assume the string uses the right encoding
if sys.version[:3] == "1.5":
_escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
else:
_escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
_escape_map = {
"&": "&",
"<": "<",
">": ">",
'"': """,
}
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode_entity(text, pattern=_escape):
# map reserved and non-ascii characters to numerical entities
def escape_entities(m, map=_escape_map):
out = []
append = out.append
for char in m.group():
text = map.get(char)
if text is None:
text = "&#%d;" % ord(char)
append(text)
return string.join(out, "")
try:
return _encode(pattern.sub(escape_entities, text), "ascii")
except TypeError:
_raise_serialization_error(text)
#
# the following functions assume an ascii-compatible encoding
# (or "utf-16")
def _escape_cdata(text, encoding=None, replace=string.replace):
# escape character data
try:
if encoding:
try:
text = _encode(text, encoding)
except UnicodeError:
return _encode_entity(text)
text = replace(text, "&", "&")
text = replace(text, "<", "<")
text = replace(text, ">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding=None, replace=string.replace):
# escape attribute value
try:
if encoding:
try:
text = _encode(text, encoding)
except UnicodeError:
return _encode_entity(text)
text = replace(text, "&", "&")
text = replace(text, "'", "'") # FIXME: overkill
text = replace(text, "\"", """)
text = replace(text, "<", "<")
text = replace(text, ">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def fixtag(tag, namespaces):
# given a decorated tag (of the form {uri}tag), return prefixed
# tag and namespace declaration, if any
if isinstance(tag, QName):
tag = tag.text
namespace_uri, tag = string.split(tag[1:], "}", 1)
prefix = namespaces.get(namespace_uri)
if prefix is None:
prefix = _namespace_map.get(namespace_uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
namespaces[namespace_uri] = prefix
if prefix == "xml":
xmlns = None
else:
xmlns = ("xmlns:%s" % prefix, namespace_uri)
else:
xmlns = None
return "%s:%s" % (prefix, tag), xmlns
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLTreeBuilder} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @return A (event, elem) iterator.
class iterparse:
def __init__(self, source, events=None):
if not hasattr(source, "read"):
source = open(source, "rb")
self._file = source
self._events = []
self._index = 0
self.root = self._root = None
self._parser = XMLTreeBuilder()
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = _encode(uri, "ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri)))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
def next(self):
while 1:
try:
item = self._events[self._index]
except IndexError:
if self._parser is None:
self.root = self._root
try:
raise StopIteration
except NameError:
raise IndexError
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
self._parser.feed(data)
else:
self._root = self._parser.close()
self._parser = None
else:
self._index = self._index + 1
return item
try:
iter
def __iter__(self):
return self
except NameError:
def __getitem__(self, index):
return self.next()
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
def XML(text):
parser = XMLTreeBuilder()
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text):
parser = XMLTreeBuilder()
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.getiterator():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding)
return string.join(data, "")
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder:
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = _ElementInterface
self._factory = element_factory
##
# Flushes the parser buffers, and returns the toplevel documen
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last != None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = string.join(self._data, "")
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @see #ElementTree
# @see #TreeBuilder
class XMLTreeBuilder:
def __init__(self, html=0, target=None):
try:
from xml.parsers import expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
self._parser = parser = expat.ParserCreate(None, "}")
if target is None:
target = TreeBuilder()
self._target = target
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
encoding = None
if not parser.returns_unicode:
encoding = "utf-8"
# target.xml(encoding, None)
self._doctype = None
self.entity = {}
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return _encode(text, "ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = self._fixtext(value)
return self._target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = self._fixtext(attrib_in[i+1])
return self._target.start(tag, attrib)
def _data(self, text):
return self._target.data(self._fixtext(text))
def _end(self, tag):
return self._target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self._target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
raise expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = string.strip(text)
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
pass
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
self._parser.Parse(data, 0)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
self._parser.Parse("", 1) # end of data
tree = self._target.close()
del self._target, self._parser # get rid of circular references
return tree
# compatibility
XMLParser = XMLTreeBuilder
| Python |
# $Id: __init__.py 1821 2004-06-03 16:57:49Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
| Python |
#!/usr/local/bin/python
#
# Distutils setup script for the Natural Language Toolkit
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from distutils.core import setup, Extension
import en.parser.nltk_lite
setup(
#############################################
## Distribution Metadata
name = "nltk_lite",
description = "Natural Language Toolkit",
version = nltk_lite.__version__,
url = nltk_lite.__url__,
long_description = nltk_lite.__longdescr__,
license = nltk_lite.__license__,
keywords = nltk_lite.__keywords__,
maintainer = nltk_lite.__maintainer__,
maintainer_email = nltk_lite.__maintainer_email__,
author = nltk_lite.__author__,
author_email = nltk_lite.__author__,
# platforms = <platforms>,
#############################################
## Package List
packages = ['nltk_lite', 'nltk_lite.corpora', 'nltk_lite.test',
'nltk_lite.tokenize', 'nltk_lite.stem', 'nltk_lite.tag',
'nltk_lite.parse', 'nltk_lite.chat', 'nltk_lite.draw',
'nltk_lite.misc', 'nltk_lite.model', 'nltk_lite.cluster',
'nltk_lite.semantics', 'nltk_lite.wordnet',
'nltk_lite.contrib', 'nltk_lite.contrib.toolbox', 'nltk_lite.etree'],
)
| Python |
# Natural Language Toolkit: Language Models
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
class ModelI(object):
"""
A processing interface for assigning a probability to the next word.
"""
def __init__(self):
'''Create a new language model.'''
raise NotImplementedError()
def train(self, text):
'''Train the model on the text.'''
raise NotImplementedError()
def probability(self, word, context):
'''Evaluate the probability of this word in this context.'''
raise NotImplementedError()
def choose_random_word(self, context):
'''Randomly select a word that is likely to appear in this context.'''
raise NotImplementedError()
def entropy(self, text):
'''Evaluate the total entropy of a message with respect to the model.
This is the sum of the log probability of each word in the message.'''
raise NotImplementedError()
| Python |
# Marshaling code, contributed by Tiago Tresoldi
# This saves/loads models to/from plain text files.
# Unlike Python's shelve and pickle utilities,
# this is useful for inspecting or tweaking the models.
# We may incorporate this as a marshal method in each model.
# TODO: describe each tagger marshal format in the epydocs?
from itertools import islice
import re
import en.parser.nltk_lite.tag as tag
from en.parser.nltk_lite.corpora import brown
# marshal-classes
class MarshalDefault (tag.Default):
_classname = "DefaultTagger"
def marshal (self, filename):
"""
Marshals (saves to a plain text file) the tagger model.
@param filename: Name of the file to which save the model (will
be overwritten if it already exists).
@type filename: C{string}
"""
handler = file(filename, "w")
handler.write(self._tag)
handler.close()
def unmarshal (self, filename):
"""
Unmarshals (loads from a plain text file) the tagger model. For
safety, this operation is intended to be performed only on
newly created taggers (i.e., without any previous model).
@param filename: Name of the file from which the model will
be read.
@type filename: C{string}
"""
handler = file(filename, "r")
self._tag = handler.read()
handler.close()
class MarshalUnigram (tag.Unigram):
_classname = "UnigramTagger"
def marshal (self, filename):
"""
Marshals (saves to a plain text file) the tagger model.
@param filename: Name of the file to which save the model (will
be overwritten if it already exists).
@type filename: C{string}
"""
handler = file(filename, "w")
for text, tag in self._model.iteritems():
handler.write("%s:%s\n" % (text, tag))
handler.close()
def unmarshal (self, filename):
"""
Unmarshals (loads from a plain text file) the tagger model. For
safety, this operation is intended to be performed only on
newly created taggers (i.e., without any previous model).
@param filename: Name of the file from which the model will
be read.
@type filename: C{string}
"""
handler = file(filename, "r")
pattern = re.compile(r'^(.+):(.+?)$', re.UNICODE)
for line in handler.readlines():
m = re.match(pattern, line)
text, tag = m.groups()
self._model[text] = tag
handler.close()
class MarshalAffix (tag.Affix):
_classname = "AffixTagger"
def marshal (self, filename):
"""
Marshals (saves to a plain text file) the tagger model.
@param filename: Name of the file to which save the model (will
be overwritten if it already exists).
@type filename: C{string}
"""
handler = file(filename, "w")
handler.write("length %i\n" % self._length)
handler.write("minlength %i\n" % self._minlength)
for text, tag in self._model.iteritems():
handler.write("%s:%s\n" % (text, tag))
handler.close()
def unmarshal (self, filename):
"""
Unmarshals (loads from a plain text file) the tagger model. For
safety, this operation is intended to be performed only on
newly created taggers (i.e., without any previous model).
@param filename: Name of the file from which the model will
be read.
@type filename: C{string}
"""
handler = file(filename, "r")
lines = handler.readlines()
# will fail if "length " and "minlength " are not present
self._length = int(lines[0].split("length ")[1])
self._minlength = int(lines[1].split("minlength ")[1])
pattern = re.compile(r'^(.+):(.+?)$', re.UNICODE)
for line in lines[2:]:
m = re.match(pattern, line)
text, tag = m.groups()
self._model[text] = tag
handler.close()
class MarshalNgram (tag.Ngram):
_classname = "NgramTagger"
def marshal (self, filename):
"""
Marshals (saves to a plain text file) the tagger model.
@param filename: Name of the file to which save the model (will
be overwritten if it already exists).
@type filename: C{string}
"""
handler = file(filename, "w")
handler.write("n %i\n" % self._n)
for entry in self._model:
context, text, tag = entry[0], entry[1], self._model[entry]
try:
entry_str = "[%s]:%s:%s\n" % (":".join(context), text, tag)
handler.write(entry_str)
except TypeError:
# None found in 'context', pass silently
pass
handler.close()
def unmarshal (self, filename):
"""
Unmarshals (loads from a plain text file) the tagger model. For
safety, this operation is intended to be performed only on
newly created taggers (i.e., without any previous model).
@param filename: Name of the file from which the model will
be read.
@type filename: C{string}
"""
handler = file(filename, "r")
lines = handler.readlines()
# will fail if "n " is not present
self._n = int(lines[0].split("n ")[1])
pattern = re.compile(r'^\[(.+)\]:(.+):(.+?)$', re.UNICODE)
# As the separator-char ":" can be used as a tag or as a text,
# 'context_pattern' is built based on the context's size (self._n),
# for example:
# self._n = 2 -> r'^(.+?)$', like 'tag1'
# self._n = 3 -> r'^(.+?):(.+?)$', like 'tag1:tag2'
# self._n = 4 -> r'^(.+?):(.+?):(.+?)$', like 'tag1:tag2:tag3'
context_pattern_str = r'^(.+?)%s$' % ( r':(.+?)' * (self._n-2) )
context_pattern = re.compile(context_pattern_str, re.UNICODE)
for line in lines[1:]:
m = re.match(pattern, line)
context, text, tag = m.groups()
c_m = re.match(context_pattern, context)
key = (c_m.groups(), text)
self._model[key] = tag
handler.close()
def demo ():
# load train corpus
train_sents = list(islice(brown.tagged(), 500))
# create taggers
tagger = MarshalNgram(3)
#tagger.train(train_sents)
#tagger.marshal("ngram.test")
tagger.unmarshal("ngram.test")
print tagger._model | Python |
# Natural Language Toolkit: Paradigm Visualisation
#
# Copyright (C) 2005 University of Melbourne
# Author: Will Hardy
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
# Parses a paradigm query and produces an XML representation of
# that query. This is part of a Python implementation of David
# Penton's paradigm visualisation model.
#This is the query XML version of "table(person, number, content)"
#
#<?xml version="1.0"?>
#<document>
# <parse-tree>
# <operator opcode="table" instruction="1">
# <operand type="domain"
# arg="horizontal">person</operand>
# <operand type="domain"
# arg="vertical">number</operand>
# <operand type="domain"
# arg="cell">content</operand>
# </operator>
# </parse-tree>
#</document>
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite import parse
from en.parser.nltk_lite.parse import cfg
from re import *
class ParadigmQuery(object):
"""
Class to read and parse a paradigm visualisation query
"""
def __init__(self, p_string=None):
"""
Construct a query.
Setup various attributes and parse given string
"""
self.nltktree = None
self.string = p_string
self.parseList = None
self.nltkTree = None
self.parseTree = None
self.xml = None
# If p_string was given, parse it
if p_string <> None:
self.parse(p_string)
def parse(self, p_string):
"""
Parses a string and stores the resulting hierarchy of "domains"
"hierarchies" and "tables"
For the sake of NLP I've parsed the string using the nltk_lite
context free grammar library.
A query is a "sentence" and can either be a domain, hierarchy or a table.
A domain is simply a word.
A hierarchy is expressed as "domain/domain"
A table is exressed as "table(sentence, sentence, sentence)"
Internally the query is represented as a nltk_lite.parse.tree
Process:
1. string is tokenized
2. develop a context free grammar
3. parse
4. convert to a tree representation
"""
self.nltktree = None
# Store the query string
self.string = p_string
"""
1. Tokenize
------------------------------------------------------------------------
"""
# Tokenize the query string, allowing only strings, parentheses,
# forward slashes and commas.
re_all = r'table[(]|\,|[)]|[/]|\w+'
data_tokens = tokenize.regexp(self.string, re_all)
"""
2. Develop a context free grammar
------------------------------------------------------------------------
"""
# Develop a context free grammar
# S = sentence, T = table, H = hierarchy, D = domain
O, T, H, D = cfg.nonterminals('O, T, H, D')
# Specify the grammar
productions = (
# A sentence can be either a table, hierarchy or domain
cfg.Production(O, [D]), cfg.Production(O, [H]), cfg.Production(O, [T]),
# A table must be the following sequence:
# "table(", sentence, comma, sentence, comma, sentence, ")"
cfg.Production(T, ['table(', O, ',', O, ',', O, ')']),
# A hierarchy must be the following sequence:
# domain, forward slash, domain
cfg.Production(H, [D, '/', D]),
# domain, forward slash, another operator
cfg.Production(H, [D, '/', O])
)
# Add domains to the cfg productions
# A domain is a token that is entirely word chars
re_domain = compile(r'^\w+$')
# Try every token and add if it matches the above regular expression
for tok in data_tokens:
if re_domain.match(tok):
prod = cfg.Production(D,[tok]),
productions = productions + prod
# Make a grammar out of our productions
grammar = cfg.Grammar(O, productions)
rd_parser = parse.RecursiveDescent(grammar)
# Tokens need to be redefined.
# It disappears after first use, and I don't know why.
tokens = tokenize.regexp(self.string, re_all)
toklist = list(tokens)
"""
3. Parse using the context free grammar
------------------------------------------------------------------------
"""
# Store the parsing.
# Only the first one, as the grammar should be completely nonambiguous.
try:
self.parseList = rd_parser.get_parse_list(toklist)[0]
except IndexError:
print "Could not parse query."
return
"""
4. Refine and convert to a Tree representation
------------------------------------------------------------------------
"""
# Set the nltk_lite.parse.tree tree for this query to the global sentence
string = str(self.parseList)
string2 = string.replace(":","").replace("')'","").replace("table(","").replace("','","").replace("'","").replace("/","")
self.nltktree = parse.tree.bracket_parse(string2)
# Store the resulting nltk_lite.parse.tree tree
self.parseTree = QuerySentence(self.nltktree)
self.xml = self.parseTree.toXML()
def getTree(self):
"""
Returns the results from the CFG parsing
"""
if self.string == None:
print "No string has been parsed. Please use parse(string)."
return None
return self.nltktree
def getXML(self):
"""
This XML is written without the use of SAX or DOM, it is a straight
translation of the parsed string. This may be slightly dangerous, but
the document is very simple. If I have time, this may be reimplemented.
"""
if self.string == None:
print "No string has been parsed. Please use parse(string)."
return None
return '<?xml version="1.0"?>\n<document><parse-tree>' + self.xml \
+ "</parse-tree></document>"
# Additional Classes for handling The various types of recursive operations
class QuerySentence(object):
"""
Handles the XML export of sentences
"""
def __init__(self, tree):
self.tree = tree
type = str(tree[0])[1:2]
# Move on, nothing to see here
if type == "O":
self.child = QuerySentence(tree[0])
self.content = self.child.content
# Get the child and replicate the data
elif type == "D":
self.child = QueryDomain(tree[0])
self.content = self.child.content
elif type == "H":
self.child = QueryHierarchy(tree[0])
self.root = self.child.root
self.leaf = self.child.leaf
elif type == "T":
self.child = QueryTable(tree[0])
self.horizontal = self.child.horizontal
self.vertical = self.child.vertical
# Otherwise, must simply be a domain...
else:
self.child = QueryDomain(tree[0])
self.content = self.child.content
self.type = self.child.type
def __str__(self):
return str(self.tree[0])
def toXML(self):
"""
Export this class to an xml string
"""
return self.child.toXML()
class QueryDomain(object):
"""
Handles the XML export of the domain operation
"""
def __init__(self, tree):
self.type = 'domain'
self.content = tree[0]
def __str__(self):
return tree[0]
def toXML(self):
"""
Export this class to an xml string
"""
return self.content
class QueryHierarchy(object):
"""
Handles the XML export of the hierarchy operation
"""
def __init__(self, tree):
self.type = 'hierarchy'
# First argument must be a Domain
self.root = QueryDomain(tree[0])
# Second argument can conceivably be anything
self.leaf = QuerySentence(tree[1])
def __str__(self):
return tree[0]
def toXML(self):
"""
Export this class to an xml string
"""
return '<operator opcode="hierarchy">' \
+ '<operand type="' + self.root.type + '" arg="root">' \
+ self.root.toXML() + "</operand>" \
+ '<operand type="' + self.leaf.type + '" arg="leaf">' \
+ self.leaf.toXML() + "</operand>" \
+ '</operator>'
class QueryTable(object):
"""
Handles the XML export of the hierarchy operation
"""
def __init__(self, tree):
"""
Simply stores attributes, passing off handling of attributes to the
QuerySentence class
"""
self.type = 'table'
self.horizontal = QuerySentence(tree[0])
self.vertical = QuerySentence(tree[1])
self.content = QuerySentence(tree[2])
def __str__(self):
return tree[0]
def toXML(self):
"""
Export this class to an xml string
"""
return '<operator opcode="table">' \
+ '<operand type="' + self.horizontal.type + '" arg="horizontal">' \
+ self.horizontal.toXML() + "</operand>" \
+ '<operand type="' + self.vertical.type + '" arg="vertical">' \
+ self.vertical.toXML() + "</operand>" \
+ '<operand type="' + self.content.type + '" arg="cell">' \
+ self.content.toXML() + "</operand>" \
+ '</operator>'
def demo():
"""
A demonstration of the use of this class
"""
query = r'table(one/two/three, four, five)'
# Print the query
print """
================================================================================
Query: ParadigmQuery(query)
================================================================================
"""
a = ParadigmQuery(query)
print query
# Print the Tree representation
print """
================================================================================
Tree: getTree()
O is an operator
T is a table
H is a hierarchy
D is a domain
================================================================================
"""
print a.getTree()
# Print the XML representation
print """
================================================================================
XML: getXML()
================================================================================
"""
print a.getXML()
# Some space
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Finite State Automata
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
FSA class - deliberately simple so that the operations are easily understood.
Operations are based on Aho, Sethi & Ullman (1986) Chapter 3.
"""
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse.tree import Tree
from en.parser.nltk_lite.parse import cfg, pcfg, pchart
epsilon = None
# some helper functions
# inserting and deleting elements from sets stored in hashes
def _hashed_set_insert(hash, key, item):
if hash.has_key(key):
hash[key].add(item)
else:
hash[key] = set([item])
def _hashed_set_delete(hash, key, item):
new = hash[key].difference(set([item]))
if len(new) > 0:
hash[key] = new
else:
del hash[key]
# TODO - check that parse was complete, and report error otherwise
# TODO - change parser to limit scope of unary operators
# to the most recent symbol
class FSA:
# default fsa accepts the empty language
def __init__(self, sigma):
self._num = -1
self._forward = {} # forward transitions
self._reverse = {} # reverse transitions
self._labels = {}
self._finals = set()
self._sigma = sigma
# the fsa accepts the empty string
# only call this right after initializing
def empty(self):
self._num = 0
self._finals = set([0])
def sigma(self):
return self._sigma
def check_in_sigma(self, label):
if label and label not in self._sigma:
raise ValueError('Label "%s" not in alphabet: %s' % (label, str(self._sigma)))
def new_state(self):
self._num += 1
return self._num
def start(self):
return 0
def finals(self):
return tuple(self._finals)
def states(self):
return range(self._num+1)
def add_final(self, state):
self._finals.add(state)
def delete_final(self, state):
self._finals = self._finals.difference(set([state]))
# del self._finals[state]
def set_final(self, states):
self._finals = set(states)
def in_finals(self, list):
return [state for state in list
if state in self.finals()] != []
def insert(self, s1, label, s2):
self.check_in_sigma(label)
_hashed_set_insert(self._forward, s1, s2)
_hashed_set_insert(self._reverse, s2, s1)
_hashed_set_insert(self._labels, (s1,s2), label)
def inserts(self, state_set, label, s2):
for s1 in tuple(state_set):
self.add(s1, label, s2)
def delete(self, s1, label, s2):
_hashed_set_delete(self._forward, s1, s2)
_hashed_set_delete(self._reverse, s2, s1)
_hashed_set_delete(self._labels, (s1,s2), label)
def delete_all(self, s1, s2):
_hashed_set_delete(self._forward, s1, s2)
_hashed_set_delete(self._reverse, s2, s1)
del self._labels[(s1,s2)]
def delete_state(self, state):
for (s1,label,s2) in self.incident_transitions(state):
self.delete_all(s1, s2)
self._relabel_state(self._num, state)
self._num -= 1
def _relabel_state(self, orig, new):
for forward in self.forward_traverse(orig):
_hashed_set_delete(self._forward, orig, forward)
_hashed_set_insert(self._forward, new, forward)
_hashed_set_delete(self._reverse, forward, orig)
_hashed_set_insert(self._reverse, forward, new)
self._labels[(new,forward)] = self._labels[(orig,forward)]
del self._labels[(orig,forward)]
for reverse in self.reverse_traverse(orig):
_hashed_set_delete(self._reverse, orig, reverse)
_hashed_set_insert(self._reverse, new, reverse)
_hashed_set_delete(self._forward, reverse, orig)
_hashed_set_insert(self._forward, reverse, new)
self._labels[(reverse,new)] = self._labels[(reverse,orig)]
del self._labels[(reverse,orig)]
if orig in self.finals():
self.delete_final(orig)
self.add_final(new)
def incident_transitions(self, state):
return [(s1,label,s2)
for (s1,label,s2) in self.transitions()
if s1 == state or s2 == state]
def transitions(self):
return [(s1,label,s2)
for ((s1,s2),label) in self._labels.items()]
def forward_traverse(self, state):
if self._forward.has_key(state):
return tuple(self._forward[state])
else:
return ()
def reverse_traverse(self, state):
if self._reverse.has_key(state):
return tuple(self._reverse[state])
else:
return ()
def next(self, s1, label):
states = []
for s2 in self.forward_traverse(s1):
if label in self._labels[(s1,s2)]:
states.append(s2)
return tuple(states)
# if self._table.has_key((state, label)):
# return tuple(self._table[(state, label)])
# else:
# return ()
def move(self, states, label):
moves = []
for state in states:
moves.extend(self.next(state, label))
return tuple(moves)
def outgoing_transitions(self, state):
transitions = []
if self._forward.has_key(s1):
s2 = self._forward[s1]
label = self._labels((s1,s2))
transitions.append((s1, labels, s2))
return transitions
# return [(s1,labels,s2)
# for (s1, labels, s2) in self.transitions()
# if s1 == state]
# delete inaccessible nodes and unused transitions
def prune(self):
acc = self.accessible()
for state in self.states():
if state not in acc:
self.delete_state(state)
# mark accessible nodes
def accessible(self):
acc = set()
for final in self.finals():
reverse_acc = set([final])
self.reverse_accessible(final, reverse_acc)
acc = acc.union(reverse_acc)
forward_acc = set([self.start()])
self.forward_accessible(self.start(), forward_acc)
acc = acc.intersection(forward_acc)
return tuple(acc)
def forward_accessible(self, s1, visited):
for s2 in self.forward_traverse(s1):
if not s2 in visited:
visited.add(s2)
self.forward_accessible(s2, visited)
def reverse_accessible(self, s1, visited):
for s2 in self.reverse_traverse(s1):
if not s2 in visited:
visited.add(s2)
self.reverse_accessible(s2, visited)
# From ASU page 119
def e_closure(self, states):
stack = list(states)
closure = list(states)
while stack:
s1 = stack.pop()
for s2 in self.next(s1, epsilon):
if s2 not in closure:
closure.append(s2)
stack.append(s2)
return tuple(closure)
# return the corresponding DFA using subset construction (ASU p118)
# NB representation of (a*) still isn't minimal; should have 1 state not 2
def dfa(self):
dfa = FSA(self.sigma())
dfa_initial = dfa.new_state()
nfa_initial = self.e_closure((self.start(),))
map = {}
map[dfa_initial] = nfa_initial
map[nfa_initial] = dfa_initial
if nfa_initial in self.finals():
dfa.add_final(dfa_initial)
unmarked = [dfa_initial]
marked = []
while unmarked:
dfa_state = unmarked.pop()
marked.append(dfa_state)
# is a final state accessible via epsilon transitions?
if self.in_finals(self.e_closure(map[dfa_state])):
dfa.add_final(dfa_state)
for label in self.sigma():
nfa_next = self.e_closure(self.move(map[dfa_state], label))
if map.has_key(nfa_next):
dfa_next = map[nfa_next]
else:
dfa_next = dfa.new_state()
map[dfa_next] = nfa_next
map[nfa_next] = dfa_next
if self.in_finals(nfa_next):
dfa.add_final(dfa_next)
unmarked.append(dfa_next)
dfa.insert(dfa_state, label, dfa_next)
return dfa
# # add num to every state identifier
# def add(self, num):
# newtable = {}
# for ((s1, label), s2) in self.transitions():
# newtable[(s1+num, label)] = map(lambda x,num:x+num, s2)
# self._table = newtable
#
# def concat(self, fsa):
# fsa.add(self._count) # relabel states for uniqueness
#
# # TODO - add epsilon transition from finals to initials
# for final in self._finals:
# self.add(final, epsilon, self._count)
# self._table.extend(fsa._table)
# generate all strings in the language up to length maxlen
def generate(self, maxlen, state=0, prefix=""):
if maxlen > 0:
if state in self._finals:
print prefix
for (s1, labels, s2) in self.outgoing_transitions(state):
for label in labels():
self.generate(maxlen-1, s2, prefix+label)
def pp(self):
t = self.transitions()
t.sort()
for (s1, label, s2) in t:
print s1, ':', label, '->', s2
print "Final:", self._finals
### FUNCTIONS TO BUILD FSA FROM REGEXP
# the grammar of regular expressions
# (probabilities ensure that unary operators
# have stronger associativity than juxtaposition)
def grammar(terminals):
(S, Star, Plus, Qmk, Paren) = [cfg.Nonterminal(s) for s in 'S*+?(']
rules = [pcfg.Production(S, [Star], prob=0.2),
pcfg.Production(S, [Plus], prob=0.2),
pcfg.Production(S, [Qmk], prob=0.2),
pcfg.Production(S, [Paren], prob=0.2),
pcfg.Production(S, [S, S], prob=0.1),
pcfg.Production(Star, [S, '*'], prob=1),
pcfg.Production(Plus, [S, '+'], prob=1),
pcfg.Production(Qmk, [S, '?'], prob=1),
pcfg.Production(Paren, ['(', S, ')'], prob=1)]
prob_term = 0.1/len(terminals) # divide remaining pr. mass
for terminal in terminals:
rules.append(pcfg.Production(S, [terminal], prob=prob_term))
return pcfg.Grammar(S, rules)
_parser = pchart.InsideParse(grammar('abcde'))
# create NFA from regexp (Thompson's construction)
# assumes unique start and final states
def re2nfa(fsa, re):
tokens = tokenize.regexp(re, pattern=r'.')
tree = _parser.parse(tokens)
if tree is None: raise ValueError('Bad Regexp')
state = re2nfa_build(fsa, fsa.start(), tree)
fsa.set_final([state])
# fsa.minimize()
def re2nfa_build(fsa, node, tree):
# Terminals.
if not isinstance(tree, Tree):
return re2nfa_char(fsa, node, tree)
elif len(tree) == 1:
return re2nfa_build(fsa, node, tree[0])
elif tree.node == '(':
return re2nfa_build(fsa, node, tree[1])
elif tree.node == '*': return re2nfa_star(fsa, node, tree[0])
elif tree.node == '+': return re2nfa_plus(fsa, node, tree[0])
elif tree.node == '?': return re2nfa_qmk(fsa, node, tree[0])
else:
node = re2nfa_build(fsa, node, tree[0])
return re2nfa_build(fsa, node, tree[1])
def re2nfa_char(fsa, node, char):
new = fsa.new_state()
fsa.add(node, char, new)
return new
def re2nfa_qmk(fsa, node, tree):
node1 = fsa.new_state()
node2 = re2nfa_build(fsa, node1, tree)
node3 = fsa.new_state()
fsa.add(node, epsilon, node1)
fsa.add(node, epsilon, node3)
fsa.add(node2, epsilon, node3)
return node3
def re2nfa_plus(fsa, node, tree):
node1 = re2nfa_build(fsa, node, tree[0])
fsa.add(node1, epsilon, node)
return node1
def re2nfa_star(fsa, node, tree):
node1 = fsa.new_state()
node2 = re2nfa_build(fsa, node1, tree)
node3 = fsa.new_state()
fsa.add(node, epsilon, node1)
fsa.add(node, epsilon, node3)
fsa.add(node2, epsilon, node1)
fsa.add(node2, epsilon, node3)
return node3
#################################################################
# Demonstration
#################################################################
def demo():
"""
A demonstration showing how FSAs can be created and used.
NB: This demo is broken.
"""
# Define an alphabet.
alphabet = "ab"
# Create a new FSA.
fsa = FSA(alphabet)
# Use a regular expression to initialize the FSA.
re = 'ab*'
print 'Regular Expression:', re
fsa.empty()
re2nfa(fsa, re)
print "NFA:"
fsa.pp()
# Convert the (nondeterministic) FSA to a deterministic FSA.
dfa = fsa.dfa()
print "DFA:"
dfa.pp()
# Prune the DFA
dfa.prune()
print "PRUNED DFA:"
dfa.pp()
# Use the FSA to generate all strings of length less than 3
# (broken)
#fsa.generate(3)
if __name__ == '__main__': demo()
| Python |
from grammarfile import GrammarFile
from featurechart import *
from en.parser.nltk_lite import tokenize
"""
An interactive interface to the feature-based parser. Run "featuredemo.py -h" for
command-line options.
This interface will read a grammar from a *.cfg file, in the format of
test.cfg. It will prompt for a filename for the grammar (unless one is given on
the command line) and for a sentence to parse, then display the edges being
generated and any resulting parse trees.
"""
def text_parse(grammar, sent, trace=2, drawtrees=False, latex=False):
parser = grammar.earley_parser(trace=trace)
print parser._grammar
tokens = list(tokenize.whitespace(sent))
trees = parser.get_parse_list(tokens)
if drawtrees:
from treeview import TreeView
TreeView(trees)
else:
for tree in trees:
if latex: print tree.latex_qtree()
else: print tree
def main():
import sys
from optparse import OptionParser, OptionGroup
usage = """%%prog [options] [grammar_file]
by Rob Speer
Distributed under the GPL. See LICENSE.TXT for information.""" % globals()
opts = OptionParser(usage=usage)
opts.add_option("-b", "--batch",
metavar="FILE", dest="batchfile", default=None,
help="Batch test: parse all the lines in a file")
opts.add_option("-v", "--verbose",
action="count", dest="verbosity", default=0,
help="show more information during parse")
opts.add_option("-q", "--quiet",
action="count", dest="quietness", default=0,
help="show only the generated parses (default in batch mode)")
opts.add_option("-l", "--latex",
action="store_true", dest="latex",
help="output parses as LaTeX trees (using qtree.sty)")
opts.add_option("-d", "--drawtrees",
action="store_true", dest="drawtrees",
help="show parse trees in a GUI window")
(options, args) = opts.parse_args()
trace = 0
batch = False
if options.batchfile is not None:
trace = 0
batch = True
if options.drawtrees:
sys.stderr.write("Cannot use --drawtrees and --batch simultaneously.")
sys.exit(1)
if options.quietness > 0: trace = 0
trace += options.verbosity
if len(args): filename = args[0]
else: filename = None
if filename is None:
sys.stderr.write("Load rules from file: ")
filename = sys.stdin.readline()[:-1]
if filename == '': return
grammar = GrammarFile.read_file(filename)
if not batch:
sys.stderr.write("Sentence: ")
sentence = sys.stdin.readline()[:-1]
if sentence == '': return
text_parse(grammar, sentence, trace, options.drawtrees, options.latex)
else:
infile = open(options.batchfile)
for line in infile.readlines():
sentence = line.strip()
if sentence == '': continue
if sentence[0] == '#': continue
print "Sentence: %s" % sentence
text_parse(grammar, sentence, trace, False, options.latex)
infile.close()
if __name__ == '__main__':
main()
| Python |
# Natural Language Toolkit: Shoebox Lexicon
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Stuart Robinson <stuart@zapata.org>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This modules provides functionality for parsing and manipulating the
contents of a Shoebox lexicon without reference to its metadata.
"""
import os, re, sys
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite.corpora.shoebox import ShoeboxFile
from utilities import Field, SequentialDictionary
class Lexicon(ShoeboxFile):
"""
This class represents a Shoebox lexicon, which consists of an
optional header and one or more Entry objects, saved in a dictionary
whose keys are passed as a parameter to the parse() method.
"""
def __init__(self, file):
"""
This method construct a Lexicon object with a header and a dictionary of
entries.
"""
self._key_fields = ['lx']
self._header = ''
self._entries = {}
self._file = file
def __str__(self):
"""
This method defines the string representation of a Lexicon object
"""
s = "%s\n" % self.get_header()
for e in self.get_entries():
s = "%s%s\n" % (s, e)
return s
def set_header(self, header):
"""
This method sets the raw text of the header.
@param header: header (as raw text)
@type header: string
"""
self._header = header
def get_header(self):
"""
This method obtains the raw text of the header.
@return: raw header
@rtype: string
"""
return self._header
def get_entries(self):
"""
This method obtains all of the entries found in a
parsed Shoebox lexicon.
@return: all of the entries in the Lexicon
@rtype: list of Entry objects
"""
keys = self._entries.keys()
keys.sort()
for k in keys :
v = self._entries[k]
for e in v :
yield e
def add_entry(self, entry, unique=False):
"""
This method adds an Entry object to a Lexicon object. It adds the
entry to the Lexicon keyed by the values of the fields specified
by the I{key_fields} argument.
@param entry: a parsed entry from a Shoebox lexicon
@type entry: Entry object
@param unique: raise exception if entry key already exists
@type unique: boolean
"""
key = ""
for field_marker in self._key_fields:
f = entry.get_field(field_marker)
if f:
values = f.get_values("/")
key = key + "-" + values
else:
# Should this throw an error if a field with no values
# is used in the list of key fields?
pass
if self._entries.has_key(key) :
if unique :
msg = "Non-unique entry! \nEntry: \n%s\nKey Fields: %s\nKey: '%s'\n" % (entry, self._key_fields, key)
raise ValueError, msg
else :
self._entries[key] = []
# Now append entry to list of entries for key
self._entries[key].append(entry)
def parse(self,
head_field_marker = 'lx',
subentry_field_marker = None,
key_fields = None,
unique_entry = True,
unique_subentry = False):
"""
This method parses a Shoebox file in a Lexicon object. It will also parse
subentries provided that the field marker identifying subentries is passed to it.
@param head_field_marker: field marker that identifies the start of an entry
@type head_field_marker: string
@param key_fields: the field(s) to which entries are keyed
@type key_fields: list of strings
@param subentry_field_marker: field marker that identifies subentries
@type subentry_field_marker: string
@param unique_entry: raise warning if entries are non-unique according
to I{key_fields} parameter
@type unique_entry: boolean
@param unique_subentry: raise warning if entries are non-unique according to
I{key_fields} parameter
@type unique_subentry: boolean
@return: a parsed Lexicon object
@rtype: dictionary object
"""
if key_fields :
self._key_fields = key_fields
# Set up variables
inside_entry = False
inside_subentry = False
e = None
se = None
# Use low-level functionality to get raw fields and walk through them
self.open(self._file)
for f in self.raw_fields() :
fmarker, fvalue = f
# What kind of field marker is it?
if fmarker.startswith("_") :
# TODO: Add field to header
pass
elif fmarker == head_field_marker :
inside_entry = True
inside_subentry = False
if e :
self.add_entry(e, unique_entry)
e = Entry()
elif subentry_field_marker and fmarker == subentry_field_marker :
inside_subentry = True
if se :
e.add_subentry(se)
se = Entry()
# Add field to entry or subentry
if inside_subentry :
se.add_field(fmarker, fvalue)
elif inside_entry :
e.add_field(fmarker, fvalue)
else :
pass
# Deal with last entry
if e :
self.add_entry(e, unique_entry)
self.close()
class Entry:
"""
This class represents an entry (record) from a Shoebox lexicon. Each entry
consists of a collection of fields, stored as a special type of dictionary
which keeps track of the sequence in which its keys were entered.
"""
def __init__(self):
"""
This method constructs a new Entry object.
"""
self._fields = SequentialDictionary()
self._rawText = ""
self._number = None
self._subentries = None
def __str__(self):
"""
This method defines the string representation of an entry.
@rtype: string
@return: an entry as a string in Standard Format
"""
s = ""
fields = self.get_fields()
for fm, fvs in self._fields.items():
for fv in fvs:
s = s + "\n\\%s %s" % (fm, fv)
return s
def set_raw_text(self, rawText):
"""
This method provides access to the raw text from which the
Entry object was parsed.
@param rawText: raw Shoebox text from which entry was parsed
@type rawText: string
"""
self._rawText = rawText
def get_raw_text(self):
"""
This method sets the raw text from which the Entry object was parsed.
@rtype: string
"""
return self._rawText
def get_subentries(self):
"""
This method obtains all of the subentries for an entry.
@rtype: list of Entry objects
@returns: all of the subentries of an entry
"""
return self._subentries
def add_subentry(self, subentry):
"""
This method adds to an entry a subentry, which is simply another
Entry object.
@param subentry: subentry
@type subentry: Entry object :
"""
if not self._subentries:
self._subentries = []
self._subentries.append(subentry)
def set_number(self, number):
"""
This method sets the position of the entry in
the dictionary as a cardinal number.
@param number: number of entry
@type number: integer
"""
self._number = number
def get_number(self):
"""
This method obtains the position of the entry in the dictionary
as a cardinal number.
@rtype: integer
"""
return self._number
def get_fields(self):
"""
This method obtains all of the fields found in the Entry object.
@rtype: list of Field objects
"""
return self._fields.values()
def get_field_markers(self):
"""
This method obtains of the field markers found in the Entry object.
@return: the field markers of an entry
@rtype: list
"""
return self._fields.keys()
def get_values_by_marker(self, field_marker, sep=None) :
return self.get_field_values_by_field_marker(field_marker, sep)
def get_field_values_by_field_marker(self, field_marker, sep=None):
"""
This method returns all of the field values for a given field marker.
If the L(sep) is set, it will return a string; otherwise, it will
return a list of Field objects.
@param field_marker: marker of desired field
@type field_marker: string
@param sep: separator for field values
@type sep: string
@rtype: string (if sep); otherwise, list of Field objects
"""
try:
values = self._fields[field_marker]
if sep == None:
return values
else:
return sep.join(values)
except KeyError:
return None
def get_field_as_string(self,
field_marker,
join_string=""):
"""
This method returns a particular field given a field marker.
Returns a blank string if field is not found.
@param field_marker: marker of desired field
@type field_marker: string
@param join_string: string used to join field values (default to blank string)
@type join_string: string
@rtype: string
"""
try:
return join_string.join(self._fields[field_marker])
except KeyError:
return ""
def get_field(self, fieldMarker):
"""
This method returns a particular field given a field marker.
@param fieldMarker: marker of desired field
@type fieldMarker: string
@rtype: Field object
"""
try:
return Field(fieldMarker, self._fields[fieldMarker])
except KeyError:
return None
def set_field(self, fieldMarker, field):
"""
This method sets a field, given a marker and its associated data.
@param fieldMarker: field marker to set
@type fieldMarker: string
@param field : field object associated with field marker
@type field : Field
"""
fvs = []
fvs.append(fieldData)
self._fields[fieldMarker] = fvs
def set_field_values(self, fieldMarker, fieldValues):
"""
This method sets all of the values associated with a field.
@param fieldMarker: field marker to set
@type fieldMarker: string
@param fieldValues: list of field values
@type fieldValues: list
"""
self._fields[fieldMarker] = fieldValues
def add_field(self, marker, value):
"""
This method adds a field to an entry if it does not already exist
and adds a new value to the field of an entry if it does.
@param marker: field marker
@type marker: string
@param value : field value
@type value : string
"""
if self._fields.has_key(marker):
fvs = self._fields[marker]
fvs.append(value)
else:
fvs = []
fvs.append(value)
self._fields[marker] = fvs
def remove_field(self, fieldMarker):
"""
This method removes from an entry every field for a given
field marker. It will not raise an error if the specified field
does not exist.
@param fieldMarker: field marker to be deleted
@type fieldMarker: string
"""
if self._fields.has_key(fieldMarker):
del self._fields[fieldMarker]
def demo() :
path = os.path.join(get_basedir(), "shoebox", "rotokas.dic")
l = Lexicon(path)
l.parse(key_fields=['lx','ps','sn'], unique_entry=False)
h = l.get_header()
for e in l.get_entries() :
print "<%s><%s><%s>" % (e.get_field_as_string("lx", ""),
e.get_field_as_string("ps", ""),
e.get_field_as_string("sn", ""))
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Shoebox Utilities
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Stuart Robinson <stuart@zapata.org>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This module provides basic functionality for handling shoebox format files.
These feed into the more sophisticated Shoebox tools available in the
modules I{lexicon}, I{text}, and I{metadata}.
"""
import re
from UserDict import UserDict
def parse_field(line):
"""
This function returns the field marker and field value of a Shoebox field.
@return: parses field as string and returns tuple with field marker and field value
@rtype: tuple
"""
mo = re.match(r"\\(.*?) (.*)", line)
if mo:
fm = mo.group(1)
fv = mo.group(2)
return (fm, fv)
else:
return None
class Field:
"""
Class used to represent a standard fromat field. A field
consists of a field marker and its value, stored as a tuple.
"""
def __init__(self, fieldMarker, fieldValue):
"""
This method constructs a Field object as a tuple of a field
marker and a field value.
@param fieldMarker: a field's marker
@type fieldMarker: string
@param fieldValue : a field's value (the actual data)
@type fieldValue : string
"""
self._field = (fieldMarker, fieldValue)
def __str__(self):
"""
This method returns the string representation of a Field object.
@return: a Field object formatted as a string
@rtype: string
"""
return "\\%s %s" % (self.getMarker(), self.getValue())
def get_marker(self):
"""
This method returns the marker for a field.
@return: a field's marker
@rtype: string
"""
return self._field[0]
def has_unique_value(self):
"""
This method checks whether a field has a single value, in
which case it returns true, or multiple values, in which
case it returns false.
@return: whether the value for a given field is unique
@rtype: boolean
"""
if not self.get_values() or len(self.get_values()) > 1:
return True
else:
return False
def has_value(self):
"""
This method checks whether a field has a value or not.
@return: whether a given field has a value
@rtype: boolean
"""
if self.get_values():
return True
else:
return False
def get_values(self, sep=None):
"""
This method returns the values for a field, either as a raw list of
values or, if a separator string is provided, as a formatted string.
@return: the values for a field; if sep provided, formatted as string
@rtype: a list of values or a string of these values joined by I{sep}
"""
values = self._field[1]
if sep == None:
return values
else:
return sep.join(values)
# class FieldParser:
# """
# Parses raw Shoebox field into a field object.
# """
# def __init__(self, rawText):
# self._rawText = rawText
# def getRawText(self):
# """
# This method returns the raw text to be parsed as a field by the parser.
# @return: string
# @rtype: a string with a standard format field as raw text
# """
# return self._rawText
# def setRawText(self, rawtext):
# """
# This method constructs a Field object as a tuple of a field
# marker and a field value.
# @param rawtext: the raw text to be parsed into a field object
# @type rawtext: string
# """
# self._rawtext = rawtext
# return self._rawtext
# def parse(self):
# regex = r"\\([A-Za-z][A-Za-z0-9\_\-]*) (.*)"
# mo = re.search(regex,
# self.getRawText())
# fm = mo.group(1)
# fv = mo.group(2)
# return Field(fm, fv)
class SequentialDictionary(UserDict):
"""
Dictionary that retains the order in which keys were added to it.
"""
def __init__(self, dict=None):
self._keys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys:
self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
dict = UserDict.copy(self)
dict._keys = self.keys[:]
return dict
def items(self):
return zip(self._keys, self.values())
def keys(self):
return self._keys
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj=None):
if key not in self._keys:
self._keys.append(key)
return UserDict.setdefault(self, key, failobj)
def update(self, dict):
UserDict.update(self, dict)
for key in dict.keys():
if key not in self._keys:
self._keys.append(key)
def values(self):
return map(self.get, self._keys)
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Natural Language Toolkit: Toolbox data file parser
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Greg Aumann <greg_aumann@sil.org>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""module for reading Toolbox data files
"""
from en.parser.nltk_lite.etree.ElementTree import TreeBuilder
from en.parser.nltk_lite.corpora import toolbox
class ToolboxData(toolbox.ToolboxData):
def __init__(self):
super(toolbox.ToolboxData, self).__init__()
def _make_parse_table(self, grammar):
"""
Return parsing state information used by tree_parser.
"""
first = dict()
gram = dict()
for sym, value in grammar.items():
first[sym] = value[0]
gram[sym] = value[0] + value[1]
parse_table = dict()
for state in gram.keys():
parse_table[state] = dict()
for to_sym in gram[state]:
if to_sym in grammar:
# is a nonterminal
# assume all firsts are terminals
for i in first[to_sym]:
parse_table[state][i] = to_sym
else:
parse_table[state][to_sym] = to_sym
return (parse_table, first)
def grammar_parse(self, startsym, grammar, **kwargs):
"""
Returns an element tree structure corresponding to a toolbox data file
parsed according to the grammar.
@type startsym: string
@param startsym: Start symbol used for the grammar
@type grammar: dictionary of tuple of tuples
@param grammar: Contains the set of rewrite rules used to parse the
database. See the description below.
@param kwargs: Keyword arguments passed to L{toolbox.StandardFormat.fields()}
@type kwargs: keyword arguments dictionary
@rtype: ElementTree._ElementInterface
@return: Contents of toolbox data parsed according to rules in grammar
The rewrite rules in the grammar look similar to those usually used in
computer languages. The difference is that the ordering constraints
that are usually present are relaxed in this parser. The reason is that
toolbox databases seldom have consistent ordering of fields. Hence the
right side of each rule consists of a tuple with two parts. The
fields in the first part mark the start of nonterminal.
Each of them can occur only once and all those must
occur before any of the fields in the second part of that nonterminal.
Otherwise they are interpreted as marking the start
of another one of the same nonterminal. If there is more than one
in the first part of the tuple they do not need to all appear in a parse.
The fields in the second part of the tuple can occur in any order.
Sample grammar::
grammar = {
'toolbox': (('_sh',), ('_DateStampHasFourDigitYear', 'entry')),
'entry': (('lx',), ('hm', 'sense', 'dt')),
'sense': (('sn', 'ps'), ('pn', 'gv', 'dv',
'gn', 'gp', 'dn', 'rn',
'ge', 'de', 're',
'example', 'lexfunc')),
'example': (('rf', 'xv',), ('xn', 'xe')),
'lexfunc': (('lf',), ('lexvalue',)),
'lexvalue': (('lv',), ('ln', 'le')),
}
"""
parse_table, first = self._make_parse_table(grammar)
builder = TreeBuilder()
pstack = list()
state = startsym
first_elems = list()
pstack.append((state, first_elems))
builder.start(state, {})
field_iter = self.fields(**kwargs)
loop = True
try:
mkr, value = field_iter.next()
except StopIteration:
loop = False
while loop:
(state, first_elems) = pstack[-1]
if mkr in parse_table[state]:
next_state = parse_table[state][mkr]
if next_state == mkr:
if mkr in first[state]:
# may be start of a new nonterminal
if mkr not in first_elems:
# not a new nonterminal
first_elems.append(mkr)
add = True
else:
# a new nonterminal, second or subsequent instance
add = False
if len(pstack) > 1:
builder.end(state)
pstack.pop()
else:
raise ValueError, \
'Line %d: syntax error, unexpected marker %s.' % (self.line_num, mkr)
else:
# start of terminal marker
add = True
if add:
if value:
builder.start(mkr, dict())
builder.data(value)
builder.end(mkr)
try:
mkr, value = field_iter.next()
except StopIteration:
loop = False
else:
# a non terminal, first instance
first_elems = list()
builder.start(next_state, dict())
pstack.append((next_state, first_elems))
else:
if len(pstack) > 1:
builder.end(state)
pstack.pop()
else:
raise ValueError, \
'Line %d: syntax error, unexpected marker %s.' % (self.line_num, mkr)
for state, first_elems in reversed(pstack):
builder.end(state)
return builder.close()
def indent(elem, level=0):
"""
Recursive function to indent an ElementTree._ElementInterface
used for pretty printing. Code from
U{http://www.effbot.org/zone/element-lib.htm}. To use run indent
on elem and then output in the normal way.
@param elem: element to be indented. will be modified.
@type elem: ElementTree._ElementInterface
@param level: level of indentation for this element
@type level: nonnegative integer
@rtype: ElementTree._ElementInterface
@return: Contents of elem indented to reflect its structure
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def demo_flat():
from en.parser.nltk_lite.etree.ElementTree import ElementTree
import sys
tree = ElementTree(toolbox.parse_corpus('iu_mien_samp.db', key='lx', encoding='utf8'))
tree.write(sys.stdout)
if __name__ == '__main__':
demo_flat()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Natural Language Toolkit: Toolbox data file parser
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Greg Aumann <greg_aumann@sil.org>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""module for reading Toolbox data files
"""
from en.parser.nltk_lite.etree.ElementTree import TreeBuilder
from en.parser.nltk_lite.corpora import toolbox
class ToolboxData(toolbox.ToolboxData):
def __init__(self):
super(toolbox.ToolboxData, self).__init__()
def _make_parse_table(self, grammar):
"""
Return parsing state information used by tree_parser.
"""
first = dict()
gram = dict()
for sym, value in grammar.items():
first[sym] = value[0]
gram[sym] = value[0] + value[1]
parse_table = dict()
for state in gram.keys():
parse_table[state] = dict()
for to_sym in gram[state]:
if to_sym in grammar:
# is a nonterminal
# assume all firsts are terminals
for i in first[to_sym]:
parse_table[state][i] = to_sym
else:
parse_table[state][to_sym] = to_sym
return (parse_table, first)
def grammar_parse(self, startsym, grammar, **kwargs):
"""
Returns an element tree structure corresponding to a toolbox data file
parsed according to the grammar.
@type startsym: string
@param startsym: Start symbol used for the grammar
@type grammar: dictionary of tuple of tuples
@param grammar: Contains the set of rewrite rules used to parse the
database. See the description below.
@param kwargs: Keyword arguments passed to L{toolbox.StandardFormat.fields()}
@type kwargs: keyword arguments dictionary
@rtype: ElementTree._ElementInterface
@return: Contents of toolbox data parsed according to rules in grammar
The rewrite rules in the grammar look similar to those usually used in
computer languages. The difference is that the ordering constraints
that are usually present are relaxed in this parser. The reason is that
toolbox databases seldom have consistent ordering of fields. Hence the
right side of each rule consists of a tuple with two parts. The
fields in the first part mark the start of nonterminal.
Each of them can occur only once and all those must
occur before any of the fields in the second part of that nonterminal.
Otherwise they are interpreted as marking the start
of another one of the same nonterminal. If there is more than one
in the first part of the tuple they do not need to all appear in a parse.
The fields in the second part of the tuple can occur in any order.
Sample grammar::
grammar = {
'toolbox': (('_sh',), ('_DateStampHasFourDigitYear', 'entry')),
'entry': (('lx',), ('hm', 'sense', 'dt')),
'sense': (('sn', 'ps'), ('pn', 'gv', 'dv',
'gn', 'gp', 'dn', 'rn',
'ge', 'de', 're',
'example', 'lexfunc')),
'example': (('rf', 'xv',), ('xn', 'xe')),
'lexfunc': (('lf',), ('lexvalue',)),
'lexvalue': (('lv',), ('ln', 'le')),
}
"""
parse_table, first = self._make_parse_table(grammar)
builder = TreeBuilder()
pstack = list()
state = startsym
first_elems = list()
pstack.append((state, first_elems))
builder.start(state, {})
field_iter = self.fields(**kwargs)
loop = True
try:
mkr, value = field_iter.next()
except StopIteration:
loop = False
while loop:
(state, first_elems) = pstack[-1]
if mkr in parse_table[state]:
next_state = parse_table[state][mkr]
if next_state == mkr:
if mkr in first[state]:
# may be start of a new nonterminal
if mkr not in first_elems:
# not a new nonterminal
first_elems.append(mkr)
add = True
else:
# a new nonterminal, second or subsequent instance
add = False
if len(pstack) > 1:
builder.end(state)
pstack.pop()
else:
raise ValueError, \
'Line %d: syntax error, unexpected marker %s.' % (self.line_num, mkr)
else:
# start of terminal marker
add = True
if add:
if value:
builder.start(mkr, dict())
builder.data(value)
builder.end(mkr)
try:
mkr, value = field_iter.next()
except StopIteration:
loop = False
else:
# a non terminal, first instance
first_elems = list()
builder.start(next_state, dict())
pstack.append((next_state, first_elems))
else:
if len(pstack) > 1:
builder.end(state)
pstack.pop()
else:
raise ValueError, \
'Line %d: syntax error, unexpected marker %s.' % (self.line_num, mkr)
for state, first_elems in reversed(pstack):
builder.end(state)
return builder.close()
def indent(elem, level=0):
"""
Recursive function to indent an ElementTree._ElementInterface
used for pretty printing. Code from
U{http://www.effbot.org/zone/element-lib.htm}. To use run indent
on elem and then output in the normal way.
@param elem: element to be indented. will be modified.
@type elem: ElementTree._ElementInterface
@param level: level of indentation for this element
@type level: nonnegative integer
@rtype: ElementTree._ElementInterface
@return: Contents of elem indented to reflect its structure
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def demo_flat():
from en.parser.nltk_lite.etree.ElementTree import ElementTree
import sys
tree = ElementTree(toolbox.parse_corpus('iu_mien_samp.db', key='lx', encoding='utf8'))
tree.write(sys.stdout)
if __name__ == '__main__':
demo_flat()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Natural Language Toolkit: Toolbox Settings Parser
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Greg Aumann <greg_aumann@sil.org>/Stuart Robinson <stuart@zapata.org>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This module provides functionality for reading settings files for Toolbox.
Settings files provide information (metadata) concerning lexicons and texts,
such as which fields are found within them and what kind of values those
fields can have.
"""
from elementtree import ElementTree
from en.parser.nltk_lite.corpora.toolbox import StandardFormat
#from en.parser.nltk_lite.parse.tree import Tree
class ToolboxSettings(StandardFormat):
"""This class is the base class for settings files."""
def __init__(self):
super(ToolboxSettings, self).__init__()
def parse(self, encoding=None, errors='strict', **kwargs):
"""Parses a settings file using ElementTree.
@param encoding: encoding used by settings file
@type encoding: string
@param errors: Error handling scheme for codec. Same as C{.decode} inbuilt method.
@type errors: string
@param kwargs: Keyword arguments passed to L{StandardFormat.fields()}
@type kwargs: keyword arguments dictionary
@rtype: ElementTree._ElementInterface
@return: contents of toolbox settings file with a nested structure
"""
builder = ElementTree.TreeBuilder()
for mkr, value in self.fields(encoding=encoding, errors=errors, **kwargs):
# Check whether the first char of the field marker
# indicates a block start (+) or end (-)
block=mkr[0]
if block in ("+", "-"):
mkr=mkr[1:]
else:
block=None
# Build tree on the basis of block char
if block == "+":
builder.start(mkr, {})
builder.data(value)
elif block == '-':
builder.end(mkr)
else:
builder.start(mkr, {})
builder.data(value)
builder.end(mkr)
return ElementTree.ElementTree(builder.close())
def to_settings_string(tree, encoding=None, errors='strict', unicode_fields=None):
# write XML to file
l = list()
_to_settings_string(tree.getroot(), l, encoding, errors, unicode_fields)
return ''.join(l)
def _to_settings_string(node, l, **kwargs):
# write XML to file
tag = node.tag
text = node.text
if len(node) == 0:
if text:
l.append('\\%s %s\n' % (tag, text))
else:
l.append('\\%s\n' % tag)
else:
l.append('\n')
if text:
l.append('\\+%s %s\n' % (tag, text))
else:
l.append('\\+%s\n' % tag)
for n in node:
_to_settings_string(n, l, **kwargs)
l.append('\\-%s\n' % tag)
return ''.join(l)
class MarkerSet :
"""This class is a container for FieldMetadata objects. A marker set
contains a list of the fields in a database together with information
about those files.
The raw SFB looks like this::
\\+mkrset
\\lngDefault Default
\\mkrRecord lx
\\+mkr dt
\\nam Date Last Edited
\\lng Default
\\mkrOverThis lx
\\-mkr
\\+mkr lx
\\nam Rotokas Word
\\lng Rotokas
\\-mkr
\\-mkrset
"""
def __init__(self) :
self._dict = {}
def get_markers(self) :
"""Obtain a list of all of the field markers for the marker set.
@returns: list of field markers
@rtype: list of strings"""
return self._dict.keys()
def add_field_metadata(self, fmeta) :
"""Add FieldMetadata object to dictionary of marker sets, keyed by field marker.
@param fmeta: field metadata to be added to collection for marker set
@type fmeta: FieldMetadata"""
self._dict[fmeta.get_marker()] = fmeta
def get_metadata_by_marker(self, mkr) :
"""Obtain a FieldMetadata object for the field marker provided.
@param mkr: field to obtain metadata for
@type mkr: string
@returns: metadata for field type associated with marker
@rtype: FieldMetadata"""
return self._dict[mkr]
def get_field_marker_hierarchy(self) :
# Find root field marker
root = None
for fm in self.get_markers() :
fmmd = self.get_metadata_by_marker(fm)
if not fmmd.get_parent_marker() :
root = fm
# Build tree for field markers
builder = ElementTree.TreeBuilder()
builder.start(root, {})
self.build_tree(root, builder)
builder.end(root)
return ElementTree.ElementTree(builder.close())
def build_tree(self, mkr, builder) :
markers = self.get_markers()
markers.sort()
for tmpmkr in markers :
fmmd = self.get_metadata_by_marker(tmpmkr)
# Field is child of current field
if fmmd.get_parent_marker() == mkr :
# Handle rangeset
rangeset = fmmd.get_rangeset()
if rangeset :
builder.start("rangeset", {})
for rsi in rangeset :
builder.start("value", {})
builder.data(rsi)
builder.end("value")
builder.end("rangeset")
# Handle rangeset
name = fmmd.get_name()
if not name :
name = ""
desc = fmmd.get_description()
if not desc :
desc = ""
d = {"name" : name,
"desc" : desc}
#print fmmd.get_language()
#print fmmd.is_multiword()
#print fmmd.requires_value()
builder.start(tmpmkr, d)
self.build_tree(tmpmkr, builder)
builder.end(tmpmkr)
return builder
class FieldMetadata :
"""This class is a container for information about a field, including its marker, name,
description, language, range set (valid values), and parent marker.
The raw field metadata looks like this::
\\+mkr dx
\\nam Dialect
\\desc dialects in which lexeme is found
\\lng Default
\\rngset Aita Atsilima Central Pipipaia
\\mkrOverThis lx
\\-mkr
"""
def __init__(self,
marker = None,
name = None,
desc = None,
lang = None,
rangeset = None,
multiword = None,
required = None,
parent_mkr = None) :
self._marker = marker
self._name = name
self._desc = desc
self._lang = lang
self._rangeset = rangeset
self._parent_mkr = parent_mkr
self._multiword = multiword
self._required = required
def get_marker(self) :
"""Obtain the marker for this field (e.g., 'dx').
@returns: marker for field
@rtype: string
"""
return self._marker
def get_name(self) :
"""Obtain the name for this field (e.g., 'Dialect').
@returns: name of field
@rtype: string
"""
return self._name
def get_description(self) :
"""Obtain the marker for this field (e.g., 'dialects in which lexeme is found').
@returns: description of field
@rtype: string
"""
return self._desc
def get_language(self) :
"""Obtain language in which field is encoded (e.g., 'Default').
@returns: name of language used for field
@rtype: string
"""
return self._lang
def get_rangeset(self) :
"""Obtain range set for field (e.g., ['Aita', 'Atsilima', 'Central', 'Pipipaia']).
@returns: list of possible values for field
@rtype: list of strings
"""
return self._rangeset
def set_rangeset(self, rangeset) :
"""Set list of valid values for field.
@param rangeset: list of valid values for the field
@type rangeset: list
"""
self._rangeset = rangeset
def get_parent_marker(self) :
"""Obtain the marker for the parent of this field (e.g., 'lx').
@returns: marker for parent field
@rtype: string
"""
return self._parent_mkr
def is_multiword(self) :
"""Determine whether the value of the field consists of multiple words.
@returns: whether field values can be multiword
@rtype: boolean
"""
return self._multiword
def requires_value(self) :
"""Determine whether the field requires a value.
@returns: whether field requires a value
@rtype: boolean
"""
return self._required
class LexiconSettings(ToolboxSettings) :
"""This class is used to parse and manipulate settings file for
lexicons."""
def __init__(self, file):
self._file = file
self._markerset = MarkerSet()
self._tree = None
def parse(self, encoding=None) :
"""Parse a settings file with lexicon metadata."""
s = Settings()
s.open(self._file)
self._tree = s.parse(encoding=encoding)
s.close()
# Handle metadata for field markers (aka, marker set)
for mkr in self._tree.findall('mkrset/mkr') :
rangeset = None
if self.__parse_value(mkr, "rngset") :
rangeset = self.__parse_value(mkr, "rngset").split()
fm = FieldMetadata(marker = mkr.text,
name = self.__parse_value(mkr, "nam"),
desc = self.__parse_value(mkr, "desc"),
lang = self.__parse_value(mkr, "lng"),
rangeset = rangeset,
multiword = self.__parse_boolean(mkr, "MultipleWordItems"),
required = self.__parse_boolean(mkr, "MustHaveData"),
parent_mkr = self.__parse_value(mkr, "mkrOverThis"))
self._markerset.add_field_metadata(fm)
# Handle range sets defined outside of marker set
# WARNING: Range sets outside the marker set override those inside the
# marker set
for rs in self._tree.findall("rngset") :
mkr = rs.findtext("mkr")
fm = self._markerset.get_metadata_by_marker(mkr)
fm.set_rangeset([d.text for d in rs.findall("dat") ])
self._markerset.add_field_metadata(fm)
def get_record_marker(self) :
return self._tree.find('mkrset/mkrRecord').text
def get_marker_set(self) :
return self._markerset
def __parse_boolean(self, mkr, name) :
if mkr.find(name) == None :
return False
else :
return True
def __parse_value(self, mkr, name) :
try :
return mkr.find(name).text
except :
return None
class InterlinearProcess :
"""This class represents a process for text interlinearization."""
def __init__(self,
from_mkr = None,
to_mkr = None,
out_mkr = None,
gloss_sep = None,
fail_mark = None,
parse_proc = None,
show_fail_mark = None,
show_root_guess = None) :
self.__from_mkr = from_mkr
self.__to_mkr = to_mkr
self.__out_mkr = out_mkr
self.__gloss_sep = gloss_sep
self.__fail_mark = fail_mark
self.__parse_proc = parse_proc
self.__show_fail_mark = show_fail_mark
self.__show_root_guess = show_root_guess
def get_output_marker(self) :
return self.__out_mkr
def get_from_marker(self) :
"""The marker searched for in the lookup process."""
return self.__from_mkr
def get_to_marker(self) :
"""The marker found in the lookup process."""
return self.__to_mkr
def get_gloss_separator(self) :
"""???"""
return self.__gloss_sep
def get_failure_marker(self) :
"""The string used in the case of lookup failure,"""
return self.__fail_mark
def is_parse_process(self) :
"""Determine whether this process is a parse process (as opposed to a lookup process)."""
return self.__parse_proc
def show_failure_marker(self) :
"""???"""
return self.__show_fail_mark
def show_root_guess(self) :
"""???"""
return self.__show_root_guess
class LookupProcess(InterlinearProcess) :
pass
class ParseProcess(InterlinearProcess) :
pass
class TextSettings(ToolboxSettings) :
"""This class is used to parse and manipulate settings file for
lexicons."""
def __init__(self, file):
self._file = file
self._markerset = MarkerSet()
self._tree = None
def parse(self, encoding=None) :
"""Parse a settings file with lexicon metadata."""
s = Settings()
s.open(self._file)
self._tree = s.parse(encoding=encoding)
s.close()
# Handle interlinear process list
for proc in self._tree.findall("intprclst/intprc") :
parseProcess = self.__parse_boolean(proc, "bParseProc")
showRootGuess = self.__parse_boolean(proc, "bShowRootGuess")
showFailMark = self.__parse_boolean(proc, "bShowFailMark")
fromMkr = self.__parse_value(proc, "mkrFrom")
outMkr = self.__parse_value(proc, "mkrOut")
toMkr = self.__parse_value(proc, "mkrTo").strip()
glossSep = self.__parse_value(proc, "GlossSeparator")
failMark = self.__parse_value(proc, "FailMark")
ip = ParseProcess(from_mkr = fromMkr,
to_mkr = toMkr,
gloss_sep = glossSep,
fail_mark = failMark,
parse_proc = parseProcess,
show_fail_mark = showFailMark,
show_root_guess = showRootGuess,
out_mkr = outMkr)
if parseProcess :
pass
else :
pass
print "----- Interlinear Process -----"
print " FROM: [%s]" % ip.get_from_marker()
print " TO: [%s]" % ip.get_to_marker()
print " GLOSS SEP: [%s]" % ip.get_gloss_separator()
print " FAIL MARK: [%s]" % ip.get_failure_marker()
print " SHOW FAIL MARK: [%s]" % ip.show_failure_marker()
print " SHOW ROOT GUESS: [%s]" % ip.show_root_guess()
print " PARSE PROCESS: [%s]" % ip.is_parse_process()
trilook = proc.find("triLook")
if trilook :
print " -- trilook --"
print " DB TYPE: [%s]" % self.__parse_value(trilook, "dbtyp")
print " MKR OUTPUT: [%s]" % self.__parse_value(trilook, "mkrOut")
tripref = proc.find("triPref")
if tripref :
print " -- tripref --"
print " DB TYPE: [%s]" % self.__parse_value(tripref, "dbtyp")
print " MKR OUTPUT: [%s]" % self.__parse_value(tripref, "mkrOut")
try :
for d in tripref.findall("drflst/drf") :
print " DB: [%s]" % self.__parse_value(d, "File")
except :
pass
try :
for d in tripref.find("mrflst") :
print " MKR: [%s]" % d.text
except :
pass
triroot = proc.find("triRoot")
if triroot :
print " -- triroot --"
print " DB TYPE: [%s]" % self.__parse_value(triroot, "dbtyp")
print " MKR OUTPUT: [%s]" % self.__parse_value(triroot, "mkrOut")
try :
for d in triroot.findall("drflst/drf") :
print " DB: [%s]" % self.__parse_value(d, "File")
except :
pass
try :
for d in triroot.find("mrflst") :
print " MKR: [%s]" % d.text
except :
pass
print ""
# Handle metadata for field markers (aka, marker set)
for mkr in self._tree.findall('mkrset/mkr') :
rangeset = None
if self.__parse_value(mkr, "rngset") :
rangeset = self.__parse_value(mkr, "rngset").split()
fm = FieldMetadata(marker = mkr.text,
name = self.__parse_value(mkr, "nam"),
desc = self.__parse_value(mkr, "desc"),
lang = self.__parse_value(mkr, "lng"),
rangeset = rangeset,
multiword = self.__parse_boolean(mkr, "MultipleWordItems"),
required = self.__parse_boolean(mkr, "MustHaveData"),
parent_mkr = self.__parse_value(mkr, "mkrOverThis"))
self._markerset.add_field_metadata(fm)
# Handle range sets defined outside of marker set
# WARNING: Range sets outside the marker set override those inside the
# marker set
for rs in self._tree.findall("rngset") :
mkr = rs.findtext("mkr")
fm = self._markerset.get_metadata_by_marker(mkr)
fm.set_rangeset([d.text for d in rs.findall("dat") ])
self._markerset.add_field_metadata(fm)
def get_record_marker(self) :
return self._tree.find('mkrset/mkrRecord').text
def get_version(self) :
return self._tree.find('ver').text
def get_description(self) :
return self._tree.find('desc').text
def get_marker_set(self) :
return self._markerset
def __parse_boolean(self, mkr, name) :
if mkr.find(name) == None :
return False
else :
return True
def __parse_value(self, mkr, name) :
try :
return mkr.find(name).text
except :
return None
def demo():
settings = ToolboxSettings()
settings.open('demos/MDF_AltH.typ')
tree = settings.parse(unwrap=False, encoding='gbk')
print tree.find('expset/expMDF/rtfPageSetup/paperSize').text
tree.write('test.xml')
print to_settings_string(tree).encode('gbk')
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Shoebox Errors
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Stuart Robinson <Stuart.Robinson@mpi.nl>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This module provides Shoebox exceptions.
"""
# ---------------------------------------------------------------------
# CLASS: ShoeboxError
# DESC: ???
# ---------------------------------------------------------------------
class ShoeboxError(Exception):
"""
This is the base class for all Shoebox errors.
"""
def __init__(self):
self._msg = ""
# ---------------------------------------------
# CLASS: ValidationError
# DESC: ???
# ---------------------------------------------
class NonUniqueEntryError(ShoeboxError):
"""
???
"""
def __init__(self) :
pass
class ValidationError(ShoeboxError):
def __init__(self):
pass
def setField(self, field):
self._field = field
def getField(self):
return self._field
# ---------------------------------------------
# CLASS: NoMetadataFound
# DESC: ???
# ---------------------------------------------
class NoMetadataFound(ValidationError):
def __init__(self, field):
self._field = field
class FieldError(ShoeboxError):
def __init__(self):
pass
def __str__(self) :
return self.get_message()
class NonUniqueFieldError(FieldError):
"""
Error raised when an attempt is made to retrieve a unique field which has more than one value
"""
def __init__(self, entry):
self._entry = entry
def setEntry(self, entry):
self._entry = entry
def getEntry(self):
return self._entry
# ---------------------------------------------
# CLASS: BadFieldValue
# DESC: ???
# ---------------------------------------------
class BadFieldValueError(ValidationError, FieldError):
FIELD_VALUE_ERROR_RANGE_SET = '1'
FIELD_VALUE_ERROR_NO_WORD_WRAP = '2'
FIELD_VALUE_ERROR_EMPTY_VALUE = '3'
FIELD_VALUE_ERROR_SINGLE_WORD = '4'
errorTypes = {
'1': "Range Set",
'2': "No Word Wrap",
'3': "Empty Value",
'4': "Single Word"
}
def __init__(self, errorType, entry, field, fmMetadata):
self._entry = entry
self._errorType = errorType
self._field = field
self._fmMetadata = fmMetadata
def __str__(self):
e = self.getEntry()
f = self.getField()
typ = self.getErrorDescription()
s = "'%s' error in '\\%s' field of record %i!\nRecord:\n%s" % (typ, f.getMarker(), e.getNumber(), e.getRawText())
return s
def getFieldMarkerMetadata(self):
return self._fmMetadata
def setFieldMarkerMetadata(self, fmMetadata):
self._fmMetadata = fmMetadata
def getErrorDescription(self):
try:
return self.errorTypes[self.getErrorType()]
except:
return None
def getErrorType(self):
return self._errorType
def setErrorType(self, errorType):
self._errorType = errorType
def getEntry(self):
return self._entry
def setEntry(self, entry):
self._entry = entry
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.