code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Natural Language Toolkit: Toolbox Settings Parser
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Greg Aumann <greg_aumann@sil.org>/Stuart Robinson <stuart@zapata.org>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This module provides functionality for reading settings files for Toolbox.
Settings files provide information (metadata) concerning lexicons and texts,
such as which fields are found within them and what kind of values those
fields can have.
"""
from elementtree import ElementTree
from en.parser.nltk_lite.corpora.toolbox import StandardFormat
#from en.parser.nltk_lite.parse.tree import Tree
class ToolboxSettings(StandardFormat):
"""This class is the base class for settings files."""
def __init__(self):
super(ToolboxSettings, self).__init__()
def parse(self, encoding=None, errors='strict', **kwargs):
"""Parses a settings file using ElementTree.
@param encoding: encoding used by settings file
@type encoding: string
@param errors: Error handling scheme for codec. Same as C{.decode} inbuilt method.
@type errors: string
@param kwargs: Keyword arguments passed to L{StandardFormat.fields()}
@type kwargs: keyword arguments dictionary
@rtype: ElementTree._ElementInterface
@return: contents of toolbox settings file with a nested structure
"""
builder = ElementTree.TreeBuilder()
for mkr, value in self.fields(encoding=encoding, errors=errors, **kwargs):
# Check whether the first char of the field marker
# indicates a block start (+) or end (-)
block=mkr[0]
if block in ("+", "-"):
mkr=mkr[1:]
else:
block=None
# Build tree on the basis of block char
if block == "+":
builder.start(mkr, {})
builder.data(value)
elif block == '-':
builder.end(mkr)
else:
builder.start(mkr, {})
builder.data(value)
builder.end(mkr)
return ElementTree.ElementTree(builder.close())
def to_settings_string(tree, encoding=None, errors='strict', unicode_fields=None):
# write XML to file
l = list()
_to_settings_string(tree.getroot(), l, encoding, errors, unicode_fields)
return ''.join(l)
def _to_settings_string(node, l, **kwargs):
# write XML to file
tag = node.tag
text = node.text
if len(node) == 0:
if text:
l.append('\\%s %s\n' % (tag, text))
else:
l.append('\\%s\n' % tag)
else:
l.append('\n')
if text:
l.append('\\+%s %s\n' % (tag, text))
else:
l.append('\\+%s\n' % tag)
for n in node:
_to_settings_string(n, l, **kwargs)
l.append('\\-%s\n' % tag)
return ''.join(l)
class MarkerSet :
"""This class is a container for FieldMetadata objects. A marker set
contains a list of the fields in a database together with information
about those files.
The raw SFB looks like this::
\\+mkrset
\\lngDefault Default
\\mkrRecord lx
\\+mkr dt
\\nam Date Last Edited
\\lng Default
\\mkrOverThis lx
\\-mkr
\\+mkr lx
\\nam Rotokas Word
\\lng Rotokas
\\-mkr
\\-mkrset
"""
def __init__(self) :
self._dict = {}
def get_markers(self) :
"""Obtain a list of all of the field markers for the marker set.
@returns: list of field markers
@rtype: list of strings"""
return self._dict.keys()
def add_field_metadata(self, fmeta) :
"""Add FieldMetadata object to dictionary of marker sets, keyed by field marker.
@param fmeta: field metadata to be added to collection for marker set
@type fmeta: FieldMetadata"""
self._dict[fmeta.get_marker()] = fmeta
def get_metadata_by_marker(self, mkr) :
"""Obtain a FieldMetadata object for the field marker provided.
@param mkr: field to obtain metadata for
@type mkr: string
@returns: metadata for field type associated with marker
@rtype: FieldMetadata"""
return self._dict[mkr]
def get_field_marker_hierarchy(self) :
# Find root field marker
root = None
for fm in self.get_markers() :
fmmd = self.get_metadata_by_marker(fm)
if not fmmd.get_parent_marker() :
root = fm
# Build tree for field markers
builder = ElementTree.TreeBuilder()
builder.start(root, {})
self.build_tree(root, builder)
builder.end(root)
return ElementTree.ElementTree(builder.close())
def build_tree(self, mkr, builder) :
markers = self.get_markers()
markers.sort()
for tmpmkr in markers :
fmmd = self.get_metadata_by_marker(tmpmkr)
# Field is child of current field
if fmmd.get_parent_marker() == mkr :
# Handle rangeset
rangeset = fmmd.get_rangeset()
if rangeset :
builder.start("rangeset", {})
for rsi in rangeset :
builder.start("value", {})
builder.data(rsi)
builder.end("value")
builder.end("rangeset")
# Handle rangeset
name = fmmd.get_name()
if not name :
name = ""
desc = fmmd.get_description()
if not desc :
desc = ""
d = {"name" : name,
"desc" : desc}
#print fmmd.get_language()
#print fmmd.is_multiword()
#print fmmd.requires_value()
builder.start(tmpmkr, d)
self.build_tree(tmpmkr, builder)
builder.end(tmpmkr)
return builder
class FieldMetadata :
"""This class is a container for information about a field, including its marker, name,
description, language, range set (valid values), and parent marker.
The raw field metadata looks like this::
\\+mkr dx
\\nam Dialect
\\desc dialects in which lexeme is found
\\lng Default
\\rngset Aita Atsilima Central Pipipaia
\\mkrOverThis lx
\\-mkr
"""
def __init__(self,
marker = None,
name = None,
desc = None,
lang = None,
rangeset = None,
multiword = None,
required = None,
parent_mkr = None) :
self._marker = marker
self._name = name
self._desc = desc
self._lang = lang
self._rangeset = rangeset
self._parent_mkr = parent_mkr
self._multiword = multiword
self._required = required
def get_marker(self) :
"""Obtain the marker for this field (e.g., 'dx').
@returns: marker for field
@rtype: string
"""
return self._marker
def get_name(self) :
"""Obtain the name for this field (e.g., 'Dialect').
@returns: name of field
@rtype: string
"""
return self._name
def get_description(self) :
"""Obtain the marker for this field (e.g., 'dialects in which lexeme is found').
@returns: description of field
@rtype: string
"""
return self._desc
def get_language(self) :
"""Obtain language in which field is encoded (e.g., 'Default').
@returns: name of language used for field
@rtype: string
"""
return self._lang
def get_rangeset(self) :
"""Obtain range set for field (e.g., ['Aita', 'Atsilima', 'Central', 'Pipipaia']).
@returns: list of possible values for field
@rtype: list of strings
"""
return self._rangeset
def set_rangeset(self, rangeset) :
"""Set list of valid values for field.
@param rangeset: list of valid values for the field
@type rangeset: list
"""
self._rangeset = rangeset
def get_parent_marker(self) :
"""Obtain the marker for the parent of this field (e.g., 'lx').
@returns: marker for parent field
@rtype: string
"""
return self._parent_mkr
def is_multiword(self) :
"""Determine whether the value of the field consists of multiple words.
@returns: whether field values can be multiword
@rtype: boolean
"""
return self._multiword
def requires_value(self) :
"""Determine whether the field requires a value.
@returns: whether field requires a value
@rtype: boolean
"""
return self._required
class LexiconSettings(ToolboxSettings) :
"""This class is used to parse and manipulate settings file for
lexicons."""
def __init__(self, file):
self._file = file
self._markerset = MarkerSet()
self._tree = None
def parse(self, encoding=None) :
"""Parse a settings file with lexicon metadata."""
s = Settings()
s.open(self._file)
self._tree = s.parse(encoding=encoding)
s.close()
# Handle metadata for field markers (aka, marker set)
for mkr in self._tree.findall('mkrset/mkr') :
rangeset = None
if self.__parse_value(mkr, "rngset") :
rangeset = self.__parse_value(mkr, "rngset").split()
fm = FieldMetadata(marker = mkr.text,
name = self.__parse_value(mkr, "nam"),
desc = self.__parse_value(mkr, "desc"),
lang = self.__parse_value(mkr, "lng"),
rangeset = rangeset,
multiword = self.__parse_boolean(mkr, "MultipleWordItems"),
required = self.__parse_boolean(mkr, "MustHaveData"),
parent_mkr = self.__parse_value(mkr, "mkrOverThis"))
self._markerset.add_field_metadata(fm)
# Handle range sets defined outside of marker set
# WARNING: Range sets outside the marker set override those inside the
# marker set
for rs in self._tree.findall("rngset") :
mkr = rs.findtext("mkr")
fm = self._markerset.get_metadata_by_marker(mkr)
fm.set_rangeset([d.text for d in rs.findall("dat") ])
self._markerset.add_field_metadata(fm)
def get_record_marker(self) :
return self._tree.find('mkrset/mkrRecord').text
def get_marker_set(self) :
return self._markerset
def __parse_boolean(self, mkr, name) :
if mkr.find(name) == None :
return False
else :
return True
def __parse_value(self, mkr, name) :
try :
return mkr.find(name).text
except :
return None
class InterlinearProcess :
"""This class represents a process for text interlinearization."""
def __init__(self,
from_mkr = None,
to_mkr = None,
out_mkr = None,
gloss_sep = None,
fail_mark = None,
parse_proc = None,
show_fail_mark = None,
show_root_guess = None) :
self.__from_mkr = from_mkr
self.__to_mkr = to_mkr
self.__out_mkr = out_mkr
self.__gloss_sep = gloss_sep
self.__fail_mark = fail_mark
self.__parse_proc = parse_proc
self.__show_fail_mark = show_fail_mark
self.__show_root_guess = show_root_guess
def get_output_marker(self) :
return self.__out_mkr
def get_from_marker(self) :
"""The marker searched for in the lookup process."""
return self.__from_mkr
def get_to_marker(self) :
"""The marker found in the lookup process."""
return self.__to_mkr
def get_gloss_separator(self) :
"""???"""
return self.__gloss_sep
def get_failure_marker(self) :
"""The string used in the case of lookup failure,"""
return self.__fail_mark
def is_parse_process(self) :
"""Determine whether this process is a parse process (as opposed to a lookup process)."""
return self.__parse_proc
def show_failure_marker(self) :
"""???"""
return self.__show_fail_mark
def show_root_guess(self) :
"""???"""
return self.__show_root_guess
class LookupProcess(InterlinearProcess) :
pass
class ParseProcess(InterlinearProcess) :
pass
class TextSettings(ToolboxSettings) :
"""This class is used to parse and manipulate settings file for
lexicons."""
def __init__(self, file):
self._file = file
self._markerset = MarkerSet()
self._tree = None
def parse(self, encoding=None) :
"""Parse a settings file with lexicon metadata."""
s = Settings()
s.open(self._file)
self._tree = s.parse(encoding=encoding)
s.close()
# Handle interlinear process list
for proc in self._tree.findall("intprclst/intprc") :
parseProcess = self.__parse_boolean(proc, "bParseProc")
showRootGuess = self.__parse_boolean(proc, "bShowRootGuess")
showFailMark = self.__parse_boolean(proc, "bShowFailMark")
fromMkr = self.__parse_value(proc, "mkrFrom")
outMkr = self.__parse_value(proc, "mkrOut")
toMkr = self.__parse_value(proc, "mkrTo").strip()
glossSep = self.__parse_value(proc, "GlossSeparator")
failMark = self.__parse_value(proc, "FailMark")
ip = ParseProcess(from_mkr = fromMkr,
to_mkr = toMkr,
gloss_sep = glossSep,
fail_mark = failMark,
parse_proc = parseProcess,
show_fail_mark = showFailMark,
show_root_guess = showRootGuess,
out_mkr = outMkr)
if parseProcess :
pass
else :
pass
print "----- Interlinear Process -----"
print " FROM: [%s]" % ip.get_from_marker()
print " TO: [%s]" % ip.get_to_marker()
print " GLOSS SEP: [%s]" % ip.get_gloss_separator()
print " FAIL MARK: [%s]" % ip.get_failure_marker()
print " SHOW FAIL MARK: [%s]" % ip.show_failure_marker()
print " SHOW ROOT GUESS: [%s]" % ip.show_root_guess()
print " PARSE PROCESS: [%s]" % ip.is_parse_process()
trilook = proc.find("triLook")
if trilook :
print " -- trilook --"
print " DB TYPE: [%s]" % self.__parse_value(trilook, "dbtyp")
print " MKR OUTPUT: [%s]" % self.__parse_value(trilook, "mkrOut")
tripref = proc.find("triPref")
if tripref :
print " -- tripref --"
print " DB TYPE: [%s]" % self.__parse_value(tripref, "dbtyp")
print " MKR OUTPUT: [%s]" % self.__parse_value(tripref, "mkrOut")
try :
for d in tripref.findall("drflst/drf") :
print " DB: [%s]" % self.__parse_value(d, "File")
except :
pass
try :
for d in tripref.find("mrflst") :
print " MKR: [%s]" % d.text
except :
pass
triroot = proc.find("triRoot")
if triroot :
print " -- triroot --"
print " DB TYPE: [%s]" % self.__parse_value(triroot, "dbtyp")
print " MKR OUTPUT: [%s]" % self.__parse_value(triroot, "mkrOut")
try :
for d in triroot.findall("drflst/drf") :
print " DB: [%s]" % self.__parse_value(d, "File")
except :
pass
try :
for d in triroot.find("mrflst") :
print " MKR: [%s]" % d.text
except :
pass
print ""
# Handle metadata for field markers (aka, marker set)
for mkr in self._tree.findall('mkrset/mkr') :
rangeset = None
if self.__parse_value(mkr, "rngset") :
rangeset = self.__parse_value(mkr, "rngset").split()
fm = FieldMetadata(marker = mkr.text,
name = self.__parse_value(mkr, "nam"),
desc = self.__parse_value(mkr, "desc"),
lang = self.__parse_value(mkr, "lng"),
rangeset = rangeset,
multiword = self.__parse_boolean(mkr, "MultipleWordItems"),
required = self.__parse_boolean(mkr, "MustHaveData"),
parent_mkr = self.__parse_value(mkr, "mkrOverThis"))
self._markerset.add_field_metadata(fm)
# Handle range sets defined outside of marker set
# WARNING: Range sets outside the marker set override those inside the
# marker set
for rs in self._tree.findall("rngset") :
mkr = rs.findtext("mkr")
fm = self._markerset.get_metadata_by_marker(mkr)
fm.set_rangeset([d.text for d in rs.findall("dat") ])
self._markerset.add_field_metadata(fm)
def get_record_marker(self) :
return self._tree.find('mkrset/mkrRecord').text
def get_version(self) :
return self._tree.find('ver').text
def get_description(self) :
return self._tree.find('desc').text
def get_marker_set(self) :
return self._markerset
def __parse_boolean(self, mkr, name) :
if mkr.find(name) == None :
return False
else :
return True
def __parse_value(self, mkr, name) :
try :
return mkr.find(name).text
except :
return None
def demo():
settings = ToolboxSettings()
settings.open('demos/MDF_AltH.typ')
tree = settings.parse(unwrap=False, encoding='gbk')
print tree.find('expset/expMDF/rtfPageSetup/paperSize').text
tree.write('test.xml')
print to_settings_string(tree).encode('gbk')
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Shoebox Text
#
# Author: Stuart Robinson <stuart@zapata.org>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This module provides tools for parsing and manipulating the contents
of a Shoebox text without reference to its metadata.
"""
import re
from utilities import Field, SequentialDictionary
from en.parser.nltk_lite.corpora.shoebox import ShoeboxFile
# --------------------------------------------------------
# CLASS: Word
# DESC: Object that represents a word.
# --------------------------------------------------------
class Word:
"""
This class defines a word object, which consists of fixed number
of attributes: a wordform, a gloss, a part of speech, and a list
of morphemes.
"""
def __init__(self,
form = None,
gloss = None,
morphemes = None,
partOfSpeech = None):
"""Constructor that initializes Word object.
@param form: the surface form for a word
@type form: string
@param gloss: the gloss for a word
@type gloss: string
@param morphemes: list of Morpheme objects for a word
@type morphemes: list
@param partOfSpeech: the part of speech for a word
@type partOfSpeech: string
"""
self._form = form
self._gloss = gloss
self._morphemes = morphemes
self._partOfSpeech = partOfSpeech
self._rawGloss = None
self._rawMorphemes = None
self._rawPartOfSpeech = None
return
def get_form(self):
"""Gives the surface form of a word."""
return self._form
def set_form(self, form):
"""Changes the surface form of a word."""
self._form = form
def get_gloss(self):
"""Gives the gloss for a word as a string (without alignment spacing)."""
return self._gloss
def set_gloss(self, gloss):
"""Change the gloss for a word."""
self._gloss = gloss
def get_morphemes(self):
"""Gives a list of Morpheme objects for a word."""
return self._morphemes
def set_morphemes(self, morphemes):
"""Change a list of Morpheme objects for a word."""
self._morphemes = morphemes
def get_part_of_speech(self):
"""Gives the part of speech for a word as a string (without alignment spacing)."""
return self._partOfSpeech
def set_part_of_speech(self, partOfSpeech):
"""Change the part of speech for a word."""
self._partOfSpeech = partOfSpeech
def get_raw_gloss(self):
return self._rawGloss
def set_raw_gloss(self, rawGloss):
self._rawGloss = rawGloss
def get_raw_morphemes(self):
return self._rawMorphemes
def set_raw_morphemes(self, rawMorphemes):
self._rawMorphemes = rawMorphemes
def get_raw_part_of_speech(self):
return self._rawPartOfSpeech
def set_raw_part_of_speech(self, rawPartOfSpeech):
self._rawPartOfSpeech = rawPartOfSpeech
# --------------------------------------------------------
# CLASS: Morpheme
# DESC: Object that represents a morpheme.
# --------------------------------------------------------
class Morpheme:
"""
This class defines a morpheme object, which consists of fixed number
of attributes: a surface form, an underlying form, a gloss, and a
part of speech.
"""
def __init__(self,
form = None,
gloss = None,
partOfSpeech = None):
"""Constructor that creates Morpheme object."""
self._form = form
self._gloss = gloss
self._partOfSpeech = partOfSpeech
return
def get_form(self):
"""Returns form for morpheme."""
return self._form
def set_form(self, form):
"""Change form for morpheme."""
self._form = form
def get_gloss(self):
"""Returns gloss for morpheme."""
return self._gloss
def set_gloss(self, gloss):
"""Change gloss for morpheme."""
self._gloss = gloss
def get_part_of_speech(self):
"""Returns part of speech for morpheme."""
return self._partOfSpeech
def set_part_of_speech(self, partOfSpeech):
"""Change part of speech for morpheme."""
self._partOfSpeech = partOfSpeech
# --------------------------------------------------------
# CLASS: Line
# DESC: Object that represents a line from an interlinear
# text.
# --------------------------------------------------------
class Line:
"""This class defines a line of interlinear glossing, such as::
\\ref 9
\\t Vigei avapaviei atarisia.
\\m vigei ava -pa -vi -ei atari -sia
\\g 1.PL.INC go -PROG -1.PL.INCL -PRES fish -PURP
\\p PRO.PERS V.I -SUFF.V.3 -SUFF.VI.4 -SUFF.VI.5 V.I -SUFF.V.4
\\fp Yumi bai go kisim pis.
\\fe We're going fishing.
The tiers of a line are saved as a sequential dictionary with
all of its associated fields. Identified by the field marker \\ref
by default."""
def __init__(self,
label=None):
"""Constructor that initializes Line object."""
self._fields = SequentialDictionary()
self._label = label
return
def add_field(self, field):
"""Add field to line."""
fm = field.get_marker()
fv = field.get_values()
self._fields[fm] = fv
def get_field_markers(self):
"""Obtain list of unique fields for the line."""
return self._fields.keys()
def get_field_as_string(self,
field_marker,
join_string=""):
"""
This method returns a particular field given a field marker.
Returns a blank string if field is not found.
@param field_marker: marker of desired field
@type field_marker: string
@param join_string: string used to join field values (default to blank string)
@type join_string: string
@rtype: string
"""
try:
return join_string.join(self._fields[field_marker])
except KeyError:
return ""
def get_field_values_by_field_marker(self, field_marker, sep=None):
"""Obtain all fields for a line, given a field marker."""
try:
values = self._fields[field_marker]
if sep == None:
return values
else:
return sep.join(values)
except KeyError:
return None
# def getField(self, field_marker):
# try:
# return self._fields[field_marker]
# except:
# return None
def get_field_values(self):
"""Obtain list of field values for the line."""
return self._fields.values()
def get_label(self):
"""Obtain identifier for line."""
return self._label
def get_raw_text(self):
"""Obtain original line of text."""
return self._rawtext
def set_label(self, label):
"""Set identifier for line."""
self._label = label
def set_raw_text(self, rawtext):
"""Set original line of text."""
self._rawtext = rawtext
def get_morphemes(self):
"""Obtain a list of morpheme objects for the line."""
morphemes = []
indices = get_indices(self.getFieldValueByFieldMarker("m"))
print "%s" % indices
morphemeFormField = self.getFieldValueByFieldMarker("m")
morphemeGlossField = self.getFieldValueByFieldMarker("g")
morphemeFormSlices = get_slices_by_indices(morphemeFormField, indices)
morphemeGlossSlices = get_slices_by_indices(morphemeGlossField, indices)
for i in range(0, len(morphemeFormSlices)):
m = Morpheme()
m.set_form(morphemeFormSlices[i].strip(" ").strip("-"))
m.set_gloss(morphemeGlossSlices[i].strip(" ").strip("-"))
morphemes.append(m)
return morphemes
def get_words(self, flagParseMorphemes=True):
"""Obtain a list of word objects for the line."""
words = []
# Obtain raw field values
lineWordFormField = self.get_field_values_by_field_marker("t")
lineMorphemeFormField = self.get_field_values_by_field_marker("m")
lineMorphemeGlossField = self.get_field_values_by_field_marker("g")
linePOSField = self.get_field_values_by_field_marker("p")
wordIndices = get_indices(lineWordFormField)
# Slice raw field values by indices
lineWordFormSlices = get_slices_by_indices(lineWordFormField, wordIndices)
lineMorphemeFormSlices = get_slices_by_indices(lineMorphemeFormField, wordIndices)
lineMorphemeGlossSlices = get_slices_by_indices(lineMorphemeGlossField, wordIndices)
linePOSSlices = get_slices_by_indices(linePOSField, wordIndices)
# Go through each slice
for i in range(0, len(lineWordFormSlices)):
wordForm = lineWordFormSlices[i]
wordMorphemeForms = lineMorphemeFormSlices[i]
wordMorphemeGlosses = lineMorphemeGlossSlices[i]
wordPOS = linePOSSlices[i]
# Initialize word object and set raw fields
w = Word()
w.set_form(wordForm.strip(" ").strip("-"))
w.set_raw_morphemes(wordMorphemeForms.strip(" ").strip("-"))
w.set_raw_gloss(wordMorphemeGlosses.strip(" ").strip("-"))
w.set_part_of_speech(wordPOS.strip(" ").strip("-"))
# Should the word be inflated with morpheme objects?
# If so, build morpheme object for each morpheme in word
if flagParseMorphemes:
morphemes = []
# Get indices from morpheme-breakdown line in order to make slices
morphemeIndices = get_indices(wordMorphemeForms)
morphemeFormSlices = get_slices_by_indices(wordMorphemeForms, morphemeIndices)
morphemeGlossSlices = get_slices_by_indices(wordMorphemeGlosses, morphemeIndices)
morphemePOSSlices = get_slices_by_indices(wordPOS, morphemeIndices)
# Go through each morpheme
for i in range(0, len(morphemeFormSlices)):
morphemeForm = morphemeFormSlices[i].strip(" ")
morphemeGloss = morphemeGlossSlices[i].strip(" ")
morphemePOS = morphemePOSSlices[i].strip(" ")
# Construct morpheme object from slices
m = Morpheme()
m.set_form(morphemeForm)
m.set_gloss(morphemeGloss)
m.set_part_of_speech(morphemePOS)
# Add cooked morpheme to temporary collection for word
morphemes.append(m)
# Inflate word with cooked morphemes
w.set_morphemes(morphemes)
words.append(w)
return words
def get_field_value_by_field_marker_and_column(self, field_marker, columnIndex):
"""Get values for line, given a field and column index."""
fv = self.getFieldValueByFieldMarker(field_marker)
field_markers = self.getFieldMarkers()
sliceFieldMarker = field_markers[columnIndex-1]
indices = getIndices(self.getFieldValueByFieldMarker(field_marker))
slices = get_slices_by_indices(fv, indices)
return slices[columnIndex-1]
# --------------------------------------------------------
# CLASS: Paragraph
# DESC: Object that represents a paragraph (i.e., a unit
# larger than a line) from an interlinear text.
# --------------------------------------------------------
class Paragraph:
"""
This class defines a unit of analysis above the line and below
the text. Every text will have at least one paragraph and some
will have more. Identified by the field marker \id by default.
"""
def __init__(self,
label=None):
"""Constructor that initializes Paragraph object."""
self._lines = []
self._label = label
return
def add_line(self, line):
"""Add line object to list of line objects for paragraph."""
self._lines.append(line)
def get_label(self):
"""Obtain identifier for paragraph."""
return self._label
def get_lines(self):
"""Get list of line objects for paragraph."""
return self._lines
def set_label(self, label):
"""Set identifier for paragraph."""
self._label = label
# --------------------------------------------------------
# CLASS: InterlinearText
# DESC: Object that represents an interlinear text and
# provides functionality for its querying and
# manipulation.
# --------------------------------------------------------
class Text(ShoeboxFile) :
"""
This class defines an interlinearized text, which consists of a collection of Paragraph objects.
"""
def __init__(self,
file = None,
fm_line = "ref",
fm_paragraph = "id",
fm_morpheme = "m",
fm_morpheme_gloss = "g",
fm_word = "w"
):
"""Constructor for Text object. All arguments are optional. By default,
the fields used to parse the Shoebox file are the following:
@param file: filepath
@type file: str
@param fm_line: field marker identifying line (default: 'ref')
@type fm_line: str
@param fm_paragraph: field marker identifying paragraph (default: 'id')
@type fm_paragraph: str
@param fm_morpheme: field marker identifying morpheme tier (default: 'm')
@type fm_morpheme: str
@param fm_morpheme_gloss: field marker identifying morpheme gloss tier (default: 'g')
@type fm_morpheme_gloss: str
@param fm_word: field marker identifying word tier (???)
@type fm_word: str
"""
self._file = file
self._fm_line = fm_line
self._fm_paragraph = fm_paragraph
self._fm_morpheme = "m"
self._fm_morpheme_gloss = "g"
self._fm_word = "w"
#self._rawtext = rawtext
self._paragraphs = []
return
def get_lines(self):
"""Obtain a list of line objects (ignoring paragraph structure)."""
lines = []
for p in self.get_paragraphs():
for l in p.get_lines():
lines.append(l)
return lines
def get_paragraphs(self):
"""Obtain a list of paragraph objects."""
return self._paragraphs
# def set_paragraphs(self, paragraphs):
# self._paragraphs = paragraphs
def add_paragraph(self, paragraph):
"""Add paragraph object to list of paragraph objects.
@param paragraph: paragraph to be added to text
@type paragraph: Paragraph
"""
self._paragraphs.append(paragraph)
# def getRawText(self):
# return self._rawtext
# def setRawText(self, rawtext):
# self._rawtext = rawtext
def getLineFM(self):
"""Get field marker that identifies a new line."""
return self._fm_line
def setLineFM(self, lineHeadFieldMarker):
"""Change default field marker that identifies new line."""
self._fm_line = lineHeadFieldMarker
def getParagraphFM(self):
"""Get field marker that identifies a new paragraph."""
return self._fm_paragraph
def setParagraphFM(self, paragraphHeadFieldMarker):
"""Change default field marker that identifies new paragraph."""
self._fm_paragraph = paragraphHeadFieldMarker
def getWordFM(self):
"""Get field marker that identifies word tier."""
return self._wordFieldMarker
def setWordFM(self, wordFieldMarker):
"""Change default field marker that identifies word tier."""
self._wordFieldMarker = wordFieldMarker
def getMorphemeFM(self):
"""Get field marker that identifies morpheme tier."""
return self._morphemeFieldMarker
def setMorphemeFM(self, morphemeFieldMarker):
"""Change default field marker that identifies morpheme tier."""
self._morphemeFieldMarker = morphemeFieldMarker
def getMorphemeGlossFM(self):
"""Get field marker that identifies morpheme gloss tier."""
return self._morphemeGlossFieldMarker
def setMorphemeGlossFM(self, morphemeGlossFieldMarker):
"""Change default field marker that identifies morpheme gloss tier."""
self._morphemeGlossFieldMarker = morphemeGlossFieldMarker
def get_file(self):
"""Get file path as string."""
return self._file
def set_file(self, file):
"""Change file path set upon initialization."""
self._file = file
def parse(self) :
"""Parse specified Shoebox file into Text object."""
# Use low-level functionality to get raw fields and walk through them
self.open(self._file)
p, l = None, None
for f in self.raw_fields() :
fmarker, fvalue = f
if fmarker == self.getParagraphFM() :
if p :
self.add_paragraph(p)
p = Paragraph(fvalue)
elif fmarker == self.getLineFM() :
if l :
p.add_line(l)
l = Line(fvalue)
else :
if l :
l.add_field(Field(fmarker, fvalue))
p.add_line(l)
self.add_paragraph(p)
# -------------------------------------------------------------
# FUNCTION: get_indices
# ------------------------------------------------------------
def get_indices(str):
"""This method finds the indices for the leftmost boundaries
of the units in a line of aligned text.
Given the field \um, this function will find the
indices identifing leftmost word boundaries, as
follows::
0 5 8 12 <- indices
| | | |
|||||||||||||||||||||||||||
\sf dit is een goede <- surface form
\um dit is een goed -e <- underlying morphemes
\mg this is a good -ADJ <- morpheme gloss
\gc DEM V ART ADJECTIVE -SUFF <- grammatical categories
\ft This is a good explanation. <- free translation
The function walks through the line char by char::
c flag.before flag.after index?
-- ----------- ---------- ------
0 1 0 yes
1 0 1 no
2 1 0 no
3 0 1 no
4 1 0 no
5 1 0 yes
@param str: aligned text
@type str: string
"""
indices = []
flag = 1
for i in range(0, len(str)):
c = str[i]
if flag and c != ' ':
indices.append(i)
flag = 0
elif not flag and c == ' ':
flag = 1
return indices
# -------------------------------------------------------------
# FUNCTION: get_slices_by_indices
# -------------------------------------------------------------
def get_slices_by_indices(str, indices):
"""Given a string and a list of indices, this function returns
a list of the substrings defined by those indices. For example,
given the arguments::
str='antidisestablishmentarianism', indices=[4, 7, 16, 20, 25]
this function returns the list::
['anti', 'dis', 'establish', 'ment', arian', 'ism']
@param str: text
@type str: string
@param indices: indices
@type indices: list of integers
"""
slices = []
for i in range(0, len(indices)):
slice = None
start = indices[i]
if i == len(indices)-1:
slice = str[start: ]
else:
finish = indices[i+1]
slice = str[start: finish]
slices.append(slice)
return slices
| Python |
# __all__ = ["data", "errors", "lexicon", "settings", "text", "utilities"]
from data import *
| Python |
# Natural Language Toolkit: Brill Tagger
#
# Copyright (C) 2001-2005 University of Pennsylvania
# Authors: Christopher Maloof <cjmaloof@gradient.cis.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Brill's transformational rule-based tagger.
"""
from en.parser.nltk_lite.tag import TagI
import bisect # for binary search through a subset of indices
import os # for finding WSJ files
import random # for shuffling WSJ files
import sys # for getting command-line arguments
import re # for performing regular expression matching
######################################################################
## The Brill Tagger
######################################################################
class Brill(TagI):
"""
Brill's transformational rule-based tagger. Brill taggers use an
X{initial tagger} (such as L{tag.Default}) to assign an intial
tag sequence to a text; and then apply an ordered list of
transformational rules to correct the tags of individual tokens.
These transformation rules are specified by the L{BrillRuleI}
interface.
Brill taggers can be created directly, from an initial tagger and
a list of transformational rules; but more often, Brill taggers
are created by learning rules from a training corpus, using either
L{BrillTrainer} or L{FastBrillTrainer}.
"""
# TODO: move into __init__() when all marshalling classes will be moved into
# standard tree
_classname = "BrillTagger"
def __init__(self, initial_tagger, rules):
"""
@param initial_tagger: The initial tagger
@type initial_tagger: L{TagI}
@param rules: An ordered list of transformation rules that
should be used to correct the initial tagging.
@type rules: C{list} of L{BrillRuleI}
"""
self._initial_tagger = initial_tagger
self._rules = rules
def rules(self):
return self._rules[:]
def tag (self, tokens):
# Inherit documentation from TagI
# Run the initial tagger.
tagged_tokens = list(self._initial_tagger.tag(tokens))
# Create a dictionary that maps each tag to a list of the
# indices of tokens that have that tag.
tag_to_positions = {}
for i, (token, tag) in enumerate(tagged_tokens):
if tag not in tag_to_positions:
tag_to_positions[tag] = set([i])
else:
tag_to_positions[tag].add(i)
# Apply each rule, in order. Only try to apply rules at
# positions that have the desired original tag.
for rule in self._rules:
# Find the positions where it might apply
positions = tag_to_positions.get(rule.original_tag(), [])
# Apply the rule at those positions.
changed = rule.apply_at(tagged_tokens, positions)
# Update tag_to_positions with the positions of tags that
# were modified.
for i in changed:
tag_to_positions[rule.original_tag()].remove(i)
if rule.replacement_tag() not in tag_to_positions:
tag_to_positions[rule.replacement_tag()] = set([i])
else:
tag_to_positions[rule.replacement_tag()].add(i)
for t in tagged_tokens:
yield t
# marshal() and unmarshal() methods by Tiago Tresoldi <tresoldi@users.sf.net>
def marshal (self, filename):
"""
Marshals (saves to a plain text file) the tagger model.
@param filename: Name of the file to which save the model (will
be overwritten if it already exists).
@type filename: C{string}
"""
handler = file(filename, "w")
for rule in self.rules():
handler.write("%s\n" % rule)
handler.close()
def unmarshal (self, filename):
"""
Unmarshals (loads from a plain text file) the tagger model. This
operation will override any previously stored rules.
@param filename: Name of the file from which the model will
be read.
@type filename: C{string}
"""
rule_a = re.compile(r"^(.+) -> (.+) if the (.+) of words i([+-]\d+)...i([+-]\d+) is '(.+)'$", re.UNICODE)
rule_b = re.compile(r"^(.+) -> (.+) if the (.+) of the (.+) word is '(.+)'$", re.UNICODE)
# erase any previous rules
self._rules = []
# load from file
handler = file(filename, "r")
lines = handler.readlines()
handler.close()
# remove '\n's, even though $ would catch them
lines = [line[:-1] for line in lines]
# remove empty lines
lines = [line for line in lines if len(line)>0]
# parse rules
for rule in lines:
match = re.match(rule_b, rule)
if match:
groups = list( match.groups() )
if groups[3] == "preceding":
groups.pop(3)
groups.insert(3, "-1")
groups.insert(4, "-1")
else:
groups.pop(3)
groups.insert(3, "1")
groups.insert(4, "1")
else:
match = re.match(rule_a, rule)
groups = list( match.groups() )
conditions = (int(groups[3]), int(groups[4]), groups[5])
if groups[2] == "tag":
r = ProximateTagsRule(groups[0], groups[1], conditions)
else:
r = ProximateWordsRule(groups[0], groups[1], conditions)
self._rules.append(r)
######################################################################
## Brill Rules
######################################################################
class BrillRuleI(object):
"""
An interface for tag transformations on a tagged corpus, as
performed by brill taggers. Each transformation finds all tokens
in the corpus that are tagged with a specific X{original tag} and
satisfy a specific X{condition}, and replaces their tags with a
X{replacement tag}. For any given transformation, the original
tag, replacement tag, and condition are fixed. Conditions may
depend on the token under consideration, as well as any other
tokens in the corpus.
Brill rules must be comparable and hashable.
"""
def apply_to(self, tokens):
"""
Apply this rule everywhere it applies in the corpus. I.e.,
for each token in the corpus that is tagged with this rule's
original tag, and that satisfies this rule's condition, set
its tag to be this rule's replacement tag.
@param tokens: The tagged corpus
@type tokens: C{list} of C{tuple}
@return: The indices of tokens whose tags were changed by this
rule.
@rtype: C{list} of C{int}
"""
return self.apply_at(tokens, range(len(tokens)))
def apply_at(self, tokens, positions):
"""
Apply this rule at every position in C{positions} where it
applies to the corpus. I.e., for each position M{p} in
C{positions}, if C{tokens[M{p}]} is tagged with this rule's
original tag, and satisfies this rule's condition, then set
its tag to be this rule's replacement tag.
@param tokens: The tagged corpus
@type tokens: list of Token
@type positions: C{list} of C{int}
@param positions: The positions where the transformation is to
be tried.
@return: The indices of tokens whose tags were changed by this
rule.
@rtype: C{int}
"""
assert False, "BrillRuleI is an abstract interface"
def applies(self, tokens, index):
"""
@return: True if the rule would change the tag of
C{tokens[index]}, False otherwise
@rtype: Boolean
@param tokens: A tagged corpus
@type tokens: list of Token
@param index: The index to check
@type index: int
"""
assert False, "BrillRuleI is an abstract interface"
def original_tag(self):
"""
@return: The tag which this C{BrillRuleI} may cause to be
replaced.
@rtype: any
"""
assert False, "BrillRuleI is an abstract interface"
def replacement_tag(self):
"""
@return: the tag with which this C{BrillRuleI} may replace
another tag.
@rtype: any
"""
assert False, "BrillRuleI is an abstract interface"
# Rules must be comparable and hashable for the algorithm to work
def __eq__(self):
assert False, "Brill rules must be comparable"
def __hash__(self):
assert False, "Brill rules must be hashable"
class ProximateTokensRule(BrillRuleI):
"""
An abstract base class for brill rules whose condition checks for
the presence of tokens with given properties at given ranges of
positions, relative to the token.
Each subclass of proximate tokens brill rule defines a method
M{extract_property}, which extracts a specific property from the
the token, such as its text or tag. Each instance is
parameterized by a set of tuples, specifying ranges of positions
and property values to check for in those ranges:
- (M{start}, M{end}, M{value})
The brill rule is then applicable to the M{n}th token iff:
- The M{n}th token is tagged with the rule's original tag; and
- For each (M{start}, M{end}, M{value}) triple:
- The property value of at least one token between
M{n+start} and M{n+end} (inclusive) is M{value}.
For example, a proximate token brill template with M{start=end=-1}
generates rules that check just the property of the preceding
token. Note that multiple properties may be included in a single
rule; the rule applies if they all hold.
"""
def __init__(self, original_tag, replacement_tag, *conditions):
"""
Construct a new brill rule that changes a token's tag from
C{original_tag} to C{replacement_tag} if all of the properties
specified in C{conditions} hold.
@type conditions: C{tuple} of C{(int, int, *)}
@param conditions: A list of 3-tuples C{(start, end, value)},
each of which specifies that the property of at least one
token between M{n}+C{start} and M{n}+C{end} (inclusive) is
C{value}.
@raise ValueError: If C{start}>C{end} for any condition.
"""
assert self.__class__ != ProximateTokensRule, \
"ProximateTokensRule is an abstract base class"
self._original = original_tag
self._replacement = replacement_tag
self._conditions = conditions
for (s,e,v) in conditions:
if s>e:
raise ValueError('Condition %s has an invalid range' %
((s,e,v),))
def extract_property(token): # [staticmethod]
"""
Returns some property characterizing this token, such as its
base lexical item or its tag.
Each implentation of this method should correspond to an
implementation of the method with the same name in a subclass
of L{ProximateTokensTemplate}.
@param token: The token
@type token: Token
@return: The property
@rtype: any
"""
assert False, "ProximateTokensRule is an abstract interface"
extract_property = staticmethod(extract_property)
def apply_at(self, tokens, positions):
# Inherit docs from BrillRuleI
# Find all locations where the rule is applicable
change = []
for i in positions:
if self.applies(tokens, i):
change.append(i)
# Make the changes. Note: this must be done in a separate
# step from finding applicable locations, since we don't want
# the rule to interact with itself.
for i in change:
(token, tag) = tokens[i]
tokens[i] = (token, self._replacement)
return change
def applies(self, tokens, index):
# Inherit docs from BrillRuleI
# Does the given token have this rule's "original tag"?
if tokens[index][1] != self._original:
return False
# Check to make sure that every condition holds.
for (start, end, val) in self._conditions:
# Find the (absolute) start and end indices.
s = max(0, index+start)
e = min(index+end+1, len(tokens))
# Look for *any* token that satisfies the condition.
for i in range(s, e):
if self.extract_property(tokens[i]) == val:
break
else:
# No token satisfied the condition; return false.
return False
# Every condition checked out, so the rule is applicable.
return True
def original_tag(self):
# Inherit docs from BrillRuleI
return self._original
def replacement_tag(self):
# Inherit docs from BrillRuleI
return self._replacement
def __eq__(self, other):
return (other != None and
other.__class__ == self.__class__ and
self._original == other._original and
self._replacement == other._replacement and
self._conditions == other._conditions)
def __hash__(self):
# Needs to include extract_property in order to distinguish subclasses
# A nicer way would be welcome.
return hash( (self._original, self._replacement, self._conditions,
self.extract_property.func_code) )
def __repr__(self):
conditions = ' and '.join(['%s in %d...%d' % (v,s,e)
for (s,e,v) in self._conditions])
return '<%s: %s->%s if %s>' % (self.__class__.__name__,
self._original, self._replacement,
conditions)
def __str__(self):
replacement = '%s -> %s' % (self._original,
self._replacement)
if len(self._conditions) == 0:
conditions = ''
else:
conditions = ' if '+ ', and '.join([self._condition_to_str(c)
for c in self._conditions])
return replacement+conditions
def _condition_to_str(self, condition):
"""
Return a string representation of the given condition.
This helper method is used by L{__str__}.
"""
(start, end, value) = condition
return ('the %s of %s is %r' %
(self.PROPERTY_NAME, self._range_to_str(start, end), value))
def _range_to_str(self, start, end):
"""
Return a string representation for the given range. This
helper method is used by L{__str__}.
"""
if start == end == 0:
return 'this word'
if start == end == -1:
return 'the preceding word'
elif start == end == 1:
return 'the following word'
elif start == end and start < 0:
return 'word i-%d' % -start
elif start == end and start > 0:
return 'word i+%d' % start
else:
if start >= 0: start = '+%d' % start
if end >= 0: end = '+%d' % end
return 'words i%s...i%s' % (start, end)
class ProximateTagsRule(ProximateTokensRule):
"""
A rule which examines the tags of nearby tokens.
@see: superclass L{ProximateTokensRule} for details.
@see: L{ProximateTagsTemplate}, which generates these rules.
"""
PROPERTY_NAME = 'tag' # for printing.
def extract_property(token): # [staticmethod]
"""@return: The given token's tag."""
return token[1]
extract_property = staticmethod(extract_property)
class ProximateWordsRule(ProximateTokensRule):
"""
A rule which examines the base types of nearby tokens.
@see: L{ProximateTokensRule} for details.
@see: L{ProximateWordsTemplate}, which generates these rules.
"""
PROPERTY_NAME = 'text' # for printing.
def extract_property(token): # [staticmethod]
"""@return: The given token's text."""
return token[0]
extract_property = staticmethod(extract_property)
######################################################################
## Brill Templates
######################################################################
class BrillTemplateI(object):
"""
An interface for generating lists of transformational rules that
apply at given corpus positions. C{BrillTemplateI} is used by
C{Brill} training algorithms to generate candidate rules.
"""
def __init__(self):
raise AssertionError, "BrillTemplateI is an abstract interface"
def applicable_rules(self, tokens, i, correctTag):
"""
Return a list of the transformational rules that would correct
the C{i}th subtoken's tag in the given token. In particular,
return a list of zero or more rules that would change
C{tagged_tokens[i][1]} to C{correctTag}, if applied
to C{token}.
If the C{i}th subtoken already has the correct tag (i.e., if
C{tagged_tokens[i][1]} == C{correctTag}), then
C{applicable_rules} should return the empty list.
@param token: The tagged tokens being tagged.
@type token: C{list} of C{tuple}
@param i: The index of the token whose tag should be corrected.
@type i: C{int}
@param correctTag: The correct tag for the C{i}th token.
@type correctTag: (any)
@rtype: C{list} of L{BrillRuleI}
"""
raise AssertionError, "BrillTemplateI is an abstract interface"
def get_neighborhood(self, token, index):
"""
Returns the set of indices C{i} such that
C{applicable_rules(token, index, ...)} depends on the value of
the C{i}th subtoken of C{token}.
This method is used by the \"fast\" Brill tagger trainer.
@param token: The tokens being tagged.
@type token: C{list} of C{tuple}
@param index: The index whose neighborhood should be returned.
@type index: C{int}
@rtype: C{Set}
"""
raise AssertionError, "BrillTemplateI is an abstract interface"
class ProximateTokensTemplate(BrillTemplateI):
"""
An brill templates that generates a list of
L{ProximateTokensRule}s that apply at a given corpus
position. In particular, each C{ProximateTokensTemplate} is
parameterized by a proximate token brill rule class and a list of
boundaries, and generates all rules that:
- use the given brill rule class
- use the given list of boundaries as the C{start} and C{end}
points for their conditions
- are applicable to the given token.
"""
def __init__(self, rule_class, *boundaries):
"""
Construct a template for generating proximate token brill
rules.
@type rule_class: C{class}
@param rule_class: The proximate token brill rule class that
should be used to generate new rules. This class must be a
subclass of L{ProximateTokensRule}.
@type boundaries: C{tuple} of C{(int, int)}
@param boundaries: A list of tuples C{(start, end)}, each of
which specifies a range for which a condition should be
created by each rule.
@raise ValueError: If C{start}>C{end} for any boundary.
"""
self._rule_class = rule_class
self._boundaries = boundaries
for (s,e) in boundaries:
if s>e:
raise ValueError('Boundary %s has an invalid range' %
((s,e),))
def applicable_rules(self, tokens, index, correct_tag):
if tokens[index][1] == correct_tag:
return []
# For each of this template's boundaries, Find the conditions
# that are applicable for the given token.
applicable_conditions = \
[self._applicable_conditions(tokens, index, start, end)
for (start, end) in self._boundaries]
# Find all combinations of these applicable conditions. E.g.,
# if applicable_conditions=[[A,B], [C,D]], then this will
# generate [[A,C], [A,D], [B,C], [B,D]].
condition_combos = [[]]
for conditions in applicable_conditions:
condition_combos = [old_conditions+[new_condition]
for old_conditions in condition_combos
for new_condition in conditions]
# Translate the condition sets into rules.
return [self._rule_class(tokens[index][1], correct_tag, *conds)
for conds in condition_combos]
def _applicable_conditions(self, tokens, index, start, end):
"""
@return: A set of all conditions for proximate token rules
that are applicable to C{tokens[index]}, given boundaries of
C{(start, end)}. I.e., return a list of all tuples C{(start,
end, M{value})}, such the property value of at least one token
between M{index+start} and M{index+end} (inclusive) is
M{value}.
"""
conditions = set()
s = max(0, index+start)
e = min(index+end+1, len(tokens))
for i in range(s, e):
value = self._rule_class.extract_property(tokens[i])
conditions.add( (start, end, value) )
return conditions
def get_neighborhood(self, tokens, index):
# inherit docs from BrillTemplateI
neighborhood = set([index])
for (start, end) in self._boundaries:
s = max(0, index+start)
e = min(index+end+1, len(tokens))
for i in range(s, e):
neighborhood.add(i)
return neighborhood
class SymmetricProximateTokensTemplate(BrillTemplateI):
"""
Simulates two L{ProximateTokensTemplate}s which are symmetric
across the location of the token. For rules of the form \"If the
M{n}th token is tagged C{A}, and any tag preceding B{or} following
the M{n}th token by a distance between M{x} and M{y} is C{B}, and
... , then change the tag of the nth token from C{A} to C{C}.\"
One C{ProximateTokensTemplate} is formed by passing in the
same arguments given to this class's constructor: tuples
representing intervals in which a tag may be found. The other
C{ProximateTokensTemplate} is constructed with the negative
of all the arguments in reversed order. For example, a
C{SymmetricProximateTokensTemplate} using the pair (-2,-1) and the
constructor C{ProximateTagsTemplate} generates the same rules as a
C{ProximateTagsTemplate} using (-2,-1) plus a second
C{ProximateTagsTemplate} using (1,2).
This is useful because we typically don't want templates to
specify only \"following\" or only \"preceding\"; we'd like our
rules to be able to look in either direction.
"""
def __init__(self, rule_class, *boundaries):
"""
Construct a template for generating proximate token brill
rules.
@type rule_class: C{class}
@param rule_class: The proximate token brill rule class that
should be used to generate new rules. This class must be a
subclass of L{ProximateTokensRule}.
@type boundaries: C{tuple} of C{(int, int)}
@param boundaries: A list of tuples C{(start, end)}, each of
which specifies a range for which a condition should be
created by each rule.
@raise ValueError: If C{start}>C{end} for any boundary.
"""
self._ptt1 = ProximateTokensTemplate(rule_class, *boundaries)
reversed = [(-e,-s) for (s,e) in boundaries]
self._ptt2 = ProximateTokensTemplate(rule_class, *reversed)
# Generates lists of a subtype of ProximateTokensRule.
def applicable_rules(self, tokens, index, correctTag):
"""
See L{BrillTemplateI} for full specifications.
@rtype: list of ProximateTokensRule
"""
return (self._ptt1.applicable_rules(tokens, index, correctTag) +
self._ptt2.applicable_rules(tokens, index, correctTag))
def get_neighborhood(self, tokens, index):
# inherit docs from BrillTemplateI
n1 = self._ptt1.get_neighborhood(tokens, index)
n2 = self._ptt2.get_neighborhood(tokens, index)
return n1.union(n2)
######################################################################
## Brill Tagger Trainer
######################################################################
class BrillTrainer(object):
"""
A trainer for brill taggers.
"""
def __init__(self, initial_tagger, templates, trace=0):
self._initial_tagger = initial_tagger
self._templates = templates
self._trace = trace
#////////////////////////////////////////////////////////////
# Training
#////////////////////////////////////////////////////////////
def train(self, train_tokens, max_rules=200, min_score=2):
"""
Trains the Brill tagger on the corpus C{train_token},
producing at most C{max_rules} transformations, each of which
reduces the net number of errors in the corpus by at least
C{min_score}.
@type train_tokens: C{list} of L{tuple}
@param train_tokens: The corpus of tagged tokens
@type max_rules: C{int}
@param max_rules: The maximum number of transformations to be created
@type min_score: C{int}
@param min_score: The minimum acceptable net error reduction
that each transformation must produce in the corpus.
"""
if self._trace > 0: print ("Training Brill tagger on %d tokens..." %
len(train_tokens))
# Create a new copy of the training token, and run the initial
# tagger on this. We will progressively update this test
# token to look more like the training token.
test_tokens = list(self._initial_tagger.tag(t[0] for t in train_tokens))
if self._trace > 2: self._trace_header()
# Look for useful rules.
rules = []
try:
while len(rules) < max_rules:
old_tags = [t[1] for t in test_tokens]
(rule, score, fixscore) = self._best_rule(test_tokens,
train_tokens)
if rule is None or score < min_score:
if self._trace > 1:
print 'Insufficient improvement; stopping'
break
else:
# Add the rule to our list of rules.
rules.append(rule)
# Use the rules to update the test token.
k = rule.apply_to(test_tokens)
# Display trace output.
if self._trace > 1:
self._trace_rule(rule, score, fixscore, len(k))
# The user can also cancel training manually:
except KeyboardInterrupt: pass
# Create and return a tagger from the rules we found.
return Brill(self._initial_tagger, rules)
#////////////////////////////////////////////////////////////
# Finding the best rule
#////////////////////////////////////////////////////////////
# Finds the rule that makes the biggest net improvement in the corpus.
# Returns a (rule, score) pair.
def _best_rule(self, test_tokens, train_tokens):
# Create a dictionary mapping from each tag to a list of the
# indices that have that tag in both test_tokens and
# train_tokens (i.e., where it is correctly tagged).
correct_indices = {}
for i in range(len(test_tokens)):
if test_tokens[i][1] == train_tokens[i][1]:
tag = test_tokens[i][1]
correct_indices.setdefault(tag, []).append(i)
# Find all the rules that correct at least one token's tag,
# and the number of tags that each rule corrects (in
# descending order of number of tags corrected).
rules = self._find_rules(test_tokens, train_tokens)
# Keep track of the current best rule, and its score.
best_rule, best_score, best_fixscore = None, 0, 0
# Consider each rule, in descending order of fixscore (the
# number of tags that the rule corrects, not including the
# number that it breaks).
for (rule, fixscore) in rules:
# The actual score must be <= fixscore; so if best_score
# is bigger than fixscore, then we already have the best
# rule.
if best_score >= fixscore:
return best_rule, best_score, best_fixscore
# Calculate the actual score, by decrementing fixscore
# once for each tag that the rule changes to an incorrect
# value.
score = fixscore
if correct_indices.has_key(rule.original_tag()):
for i in correct_indices[rule.original_tag()]:
if rule.applies(test_tokens, i):
score -= 1
# If the score goes below best_score, then we know
# that this isn't the best rule; so move on:
if score <= best_score: break
#print '%5d %5d %s' % (fixscore, score, rule)
# If the actual score is better than the best score, then
# update best_score and best_rule.
if score > best_score:
best_rule, best_score, best_fixscore = rule, score, fixscore
# Return the best rule, and its score.
return best_rule, best_score, best_fixscore
def _find_rules(self, test_tokens, train_tokens):
"""
Find all rules that correct at least one token's tag in
C{test_tokens}.
@return: A list of tuples C{(rule, fixscore)}, where C{rule}
is a brill rule and C{fixscore} is the number of tokens
whose tag the rule corrects. Note that C{fixscore} does
I{not} include the number of tokens whose tags are changed
to incorrect values.
"""
# Create a list of all indices that are incorrectly tagged.
error_indices = [i for i in range(len(test_tokens))
if (test_tokens[i][1] !=
train_tokens[i][1])]
# Create a dictionary mapping from rules to their positive-only
# scores.
rule_score_dict = {}
for i in range(len(test_tokens)):
rules = self._find_rules_at(test_tokens, train_tokens, i)
for rule in rules:
rule_score_dict[rule] = rule_score_dict.get(rule,0) + 1
# Convert the dictionary into a list of (rule, score) tuples,
# sorted in descending order of score.
rule_score_items = rule_score_dict.items()
temp = [(-score, rule) for (rule, score) in rule_score_items]
temp.sort()
return [(rule, -negscore) for (negscore, rule) in temp]
def _find_rules_at(self, test_tokens, train_tokens, i):
"""
@rtype: C{Set}
@return: the set of all rules (based on the templates) that
correct token C{i}'s tag in C{test_tokens}.
"""
applicable_rules = set()
if test_tokens[i][1] != train_tokens[i][1]:
correct_tag = train_tokens[i][1]
for template in self._templates:
new_rules = template.applicable_rules(test_tokens, i,
correct_tag)
applicable_rules.update(new_rules)
return applicable_rules
#////////////////////////////////////////////////////////////
# Tracing
#////////////////////////////////////////////////////////////
def _trace_header(self):
print """
B |
S F r O | Score = Fixed - Broken
c i o t | R Fixed = num tags changed incorrect -> correct
o x k h | u Broken = num tags changed correct -> incorrect
r e e e | l Other = num tags changed incorrect -> incorrect
e d n r | e
------------------+-------------------------------------------------------
""".rstrip()
def _trace_rule(self, rule, score, fixscore, numchanges):
if self._trace > 2:
print ('%4d%4d%4d%4d ' % (score, fixscore, fixscore-score,
numchanges-fixscore*2+score)), '|',
print rule
######################################################################
## Fast Brill Tagger Trainer
######################################################################
class FastBrillTrainer(object):
"""
A faster trainer for brill taggers.
"""
def __init__(self, initial_tagger, templates, trace=0):
self._initial_tagger = initial_tagger
self._templates = templates
self._trace = trace
#////////////////////////////////////////////////////////////
# Training
#////////////////////////////////////////////////////////////
def train(self, train_tokens, max_rules=200, min_score=2):
# If TESTING is true, extra computation is done to determine whether
# each "best" rule actually reduces net error by the score it received.
TESTING = False
# Basic idea: Keep track of the rules that apply at each position.
# And keep track of the positions to which each rule applies.
# The set of somewhere-useful rules that apply at each position
rulesByPosition = []
for i in range(len(train_tokens)):
rulesByPosition.append(set())
# Mapping somewhere-useful rules to the positions where they apply.
# Then maps each position to the score change the rule generates there.
# (always -1, 0, or 1)
positionsByRule = {}
# Map scores to sets of rules known to achieve *at most* that score.
rulesByScore = {0:{}}
# Conversely, map somewhere-useful rules to their minimal scores.
ruleScores = {}
tagIndices = {} # Lists of indices, mapped to by their tags
# Maps rules to the first index in the corpus where it may not be known
# whether the rule applies. (Rules can't be chosen for inclusion
# unless this value = len(corpus). But most rules are bad, and
# we won't need to check the whole corpus to know that.)
# Some indices past this may actually have been checked; it just isn't
# guaranteed.
firstUnknownIndex = {}
# Make entries in the rule-mapping dictionaries.
# Should be called before _updateRuleApplies.
def _initRule (rule):
positionsByRule[rule] = {}
rulesByScore[0][rule] = None
ruleScores[rule] = 0
firstUnknownIndex[rule] = 0
# Takes a somewhere-useful rule which applies at index i;
# Updates all rule data to reflect that the rule so applies.
def _updateRuleApplies (rule, i):
# If the rule is already known to apply here, ignore.
# (This only happens if the position's tag hasn't changed.)
if positionsByRule[rule].has_key(i):
return
if rule.replacement_tag() == train_tokens[i][1]:
positionsByRule[rule][i] = 1
elif rule.original_tag() == train_tokens[i][1]:
positionsByRule[rule][i] = -1
else: # was wrong, remains wrong
positionsByRule[rule][i] = 0
# Update rules in the other dictionaries
del rulesByScore[ruleScores[rule]][rule]
ruleScores[rule] += positionsByRule[rule][i]
if not rulesByScore.has_key(ruleScores[rule]):
rulesByScore[ruleScores[rule]] = {}
rulesByScore[ruleScores[rule]][rule] = None
rulesByPosition[i].add(rule)
# Takes a rule which no longer applies at index i;
# Updates all rule data to reflect that the rule doesn't apply.
def _updateRuleNotApplies (rule, i):
del rulesByScore[ruleScores[rule]][rule]
ruleScores[rule] -= positionsByRule[rule][i]
if not rulesByScore.has_key(ruleScores[rule]):
rulesByScore[ruleScores[rule]] = {}
rulesByScore[ruleScores[rule]][rule] = None
del positionsByRule[rule][i]
rulesByPosition[i].remove(rule)
# Optional addition: if the rule now applies nowhere, delete
# all its dictionary entries.
tagged_tokens = list(self._initial_tagger.tag(t[0] for t in train_tokens))
# First sort the corpus by tag, and also note where the errors are.
errorIndices = [] # only used in initialization
for i in range(len(tagged_tokens)):
tag = tagged_tokens[i][1]
if tag != train_tokens[i][1]:
errorIndices.append(i)
if not tagIndices.has_key(tag):
tagIndices[tag] = []
tagIndices[tag].append(i)
print "Finding useful rules..."
# Collect all rules that fix any errors, with their positive scores.
for i in errorIndices:
for template in self._templates:
# Find the templated rules that could fix the error.
for rule in template.applicable_rules(tagged_tokens, i,
train_tokens[i][1]):
if not positionsByRule.has_key(rule):
_initRule(rule)
_updateRuleApplies(rule, i)
print "Done initializing %i useful rules." %len(positionsByRule)
if TESTING:
after = -1 # bug-check only
# Each iteration through the loop tries a new maxScore.
maxScore = max(rulesByScore.keys())
rules = []
while len(rules) < max_rules and maxScore >= min_score:
# Find the next best rule. This is done by repeatedly taking a rule with
# the highest score and stepping through the corpus to see where it
# applies. When it makes an error (decreasing its score) it's bumped
# down, and we try a new rule with the highest score.
# When we find a rule which has the highest score AND which has been
# tested against the entire corpus, we can conclude that it's the next
# best rule.
bestRule = None
bestRules = rulesByScore[maxScore].keys()
for rule in bestRules:
# Find the first relevant index at or following the first
# unknown index. (Only check indices with the right tag.)
ti = bisect.bisect_left(tagIndices[rule.original_tag()],
firstUnknownIndex[rule])
for nextIndex in tagIndices[rule.original_tag()][ti:]:
if rule.applies(tagged_tokens, nextIndex):
_updateRuleApplies(rule, nextIndex)
if ruleScores[rule] < maxScore:
firstUnknownIndex[rule] = nextIndex+1
break # the _update demoted the rule
# If we checked all remaining indices and found no more errors:
if ruleScores[rule] == maxScore:
firstUnknownIndex[rule] = len(tagged_tokens) # i.e., we checked them all
print "%i) %s (score: %i)" %(len(rules)+1, rule, maxScore)
bestRule = rule
break
if bestRule == None: # all rules dropped below maxScore
del rulesByScore[maxScore]
maxScore = max(rulesByScore.keys())
continue # with next-best rules
# bug-check only
if TESTING:
before = len(_errorPositions(tagged_tokens, train_tokens))
print "There are %i errors before applying this rule." %before
assert after == -1 or before == after, \
"after=%i but before=%i" %(after,before)
print "Applying best rule at %i locations..." \
%len(positionsByRule[bestRule].keys())
# If we reach this point, we've found a new best rule.
# Apply the rule at the relevant sites.
# (apply_at is a little inefficient here, since we know the rule applies
# and don't actually need to test it again.)
rules.append(bestRule)
bestRule.apply_at(tagged_tokens, positionsByRule[bestRule].keys())
# Update the tag index accordingly.
for i in positionsByRule[bestRule].keys(): # where it applied
# Update positions of tags
# First, find and delete the index for i from the old tag.
oldIndex = bisect.bisect_left(tagIndices[bestRule.original_tag()], i)
del tagIndices[bestRule.original_tag()][oldIndex]
# Then, insert i into the index list of the new tag.
if not tagIndices.has_key(bestRule.replacement_tag()):
tagIndices[bestRule.replacement_tag()] = []
newIndex = bisect.bisect_left(tagIndices[bestRule.replacement_tag()], i)
tagIndices[bestRule.replacement_tag()].insert(newIndex, i)
# This part is tricky.
# We need to know which sites might now require new rules -- that
# is, which sites are close enough to the changed site so that
# a template might now generate different rules for it.
# Only the templates can know this.
#
# If a template now generates a different set of rules, we have
# to update our indices to reflect that.
print "Updating neighborhoods of changed sites.\n"
# First, collect all the indices that might get new rules.
neighbors = set()
for i in positionsByRule[bestRule].keys(): # sites changed
for template in self._templates:
neighbors.update(template.get_neighborhood(tagged_tokens, i))
# Then collect the new set of rules for each such index.
c = d = e = 0
for i in neighbors:
siteRules = set()
for template in self._templates:
# Get a set of the rules that the template now generates
siteRules.update(set(template.applicable_rules(
tagged_tokens, i, train_tokens[i][1])))
# Update rules no longer generated here by any template
for obsolete in rulesByPosition[i] - siteRules:
c += 1
_updateRuleNotApplies(obsolete, i)
# Update rules only now generated by this template
for newRule in siteRules - rulesByPosition[i]:
d += 1
if not positionsByRule.has_key(newRule):
e += 1
_initRule(newRule) # make a new rule w/score=0
_updateRuleApplies(newRule, i) # increment score, etc.
if TESTING:
after = before - maxScore
print "%i obsolete rule applications, %i new ones, " %(c,d)+ \
"using %i previously-unseen rules." %e
maxScore = max(rulesByScore.keys()) # may have gone up
if self._trace > 0: print ("Training Brill tagger on %d tokens..." %
len(train_tokens))
# Maintain a list of the rules that apply at each position.
rules_by_position = [{} for tok in train_tokens]
# Create and return a tagger from the rules we found.
return Brill(self._initial_tagger, rules)
######################################################################
## Testing
######################################################################
def _errorPositions (train_tokens, tokens):
return [i for i in range(len(tokens))
if tokens[i][1] !=
train_tokens[i][1] ]
# returns a list of errors in string format
def errorList (train_tokens, tokens, radius=2):
"""
Returns a list of human-readable strings indicating the errors in the
given tagging of the corpus.
@param train_tokens: The correct tagging of the corpus
@type train_tokens: C{list} of C{tuple}
@param tokens: The tagged corpus
@type tokens: C{list} of C{tuple}
@param radius: How many tokens on either side of a wrongly-tagged token
to include in the error string. For example, if C{radius}=2, each error
string will show the incorrect token plus two tokens on either side.
@type radius: int
"""
errors = []
indices = _errorPositions(train_tokens, tokens)
tokenLen = len(tokens)
for i in indices:
ei = tokens[i][1].rjust(3) + " -> " \
+ train_tokens[i][1].rjust(3) + ": "
for j in range( max(i-radius, 0), min(i+radius+1, tokenLen) ):
if tokens[j][0] == tokens[j][1]:
s = tokens[j][0] # don't print punctuation tags
else:
s = tokens[j][0] + "/" + tokens[j][1]
if j == i:
ei += "**"+s+"** "
else:
ei += s + " "
errors.append(ei)
return errors
#####################################################################################
# Demonstration
#####################################################################################
def demo(num_sents=100, max_rules=200, min_score=2, error_output = "errors.out",
rule_output="rules.out", randomize=False, train=.8, trace=3):
"""
Brill Tagger Demonstration
@param num_sents: how many sentences of training and testing data to use
@type num_sents: L{int}
@param max_rules: maximum number of rule instances to create
@type max_rules: L{int}
@param min_score: the minimum score for a rule in order for it to be considered
@type min_score: L{int}
@param error_output: the file where errors will be saved
@type error_output: L{string}
@param rule_output: the file where rules will be saved
@type rule_output: L{string}
@param randomize: whether the training data should be a random subset of the corpus
@type randomize: L{boolean}
@param train: the fraction of the the corpus to be used for training (1=all)
@type train: L{float}
@param trace: the level of diagnostic tracing output to produce (0-3)
@type train: L{int}
"""
from en.parser.nltk_lite.corpora import treebank
from en.parser.nltk_lite import tag
from en.parser.nltk_lite.tag import brill
NN_CD_tagger = tag.Regexp([(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')])
# train is the proportion of data used in training; the rest is reserved
# for testing.
print "Loading tagged data..."
sents = list(treebank.tagged())
if randomize:
random.seed(len(sents))
random.shuffle(sents)
tagged_data = [t for s in sents[:num_sents] for t in s]
cutoff = int(len(tagged_data)*train)
training_data = tagged_data[:cutoff]
gold_data = tagged_data[cutoff:]
testing_data = [t[0] for t in gold_data]
# Unigram tagger
print "Training unigram tagger:",
u = tag.Unigram(backoff=NN_CD_tagger)
# NB training and testing are required to use a list-of-lists structure,
# so we wrap the flattened corpus data with the extra list structure.
u.train([training_data])
print("[accuracy: %f]" % tag.accuracy(u, [gold_data]))
# Brill tagger
templates = [
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,1)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (2,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,3)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,1)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (2,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,3)),
brill.ProximateTokensTemplate(brill.ProximateTagsRule, (-1, -1), (1,1)),
brill.ProximateTokensTemplate(brill.ProximateWordsRule, (-1, -1), (1,1)),
]
#trainer = brill.FastBrillTrainer(u, templates, trace)
trainer = brill.BrillTrainer(u, templates, trace)
b = trainer.train(training_data, max_rules, min_score)
print
print("Brill accuracy: %f" % tag.accuracy(b, [gold_data]))
print("\nRules: ")
printRules = file(rule_output, 'w')
for rule in b.rules():
print(str(rule))
printRules.write(str(rule)+"\n\n")
testing_data = list(b.tag(testing_data))
el = errorList(gold_data, testing_data)
errorFile = file(error_output, 'w')
for e in el:
errorFile.write(e+"\n\n")
errorFile.close()
print "Done; rules and errors saved to %s and %s." % (rule_output, error_output)
if __name__ == '__main__':
demo()
| Python |
"""
This module provides utilities for treating Python dictionaries as X{feature
structures}. Specifically, it contains the C{unify} function, which can be used
to merge the properties of two dictionaries, and the C{Variable} class, which
holds an unknown value to be used in unification.
A X{feature structure} is a mapping from feature names to feature values,
where:
- Each X{feature name} is a case sensitive string.
- Each X{feature value} can be a base value (such as a string), a
variable, or a nested feature structure.
However, feature structures are not a specialized class; they are represented
by dictionaries, or more generally by anything that responds to the C{has_key}
method. The YAML representation can be used to create and display feature
structures intuitively:
>>> f1 = yaml.load('''
... A:
... B: b
... D: d
... ''')
>>> f2 = yaml.load('''
... A:
... C: c
... D: d
... ''')
>>> print yaml.show(unify(f1, f2))
A:
B: b
C: c
D: d
Feature structures are typically used to represent partial information
about objects. A feature name that is not mapped to a value stands
for a feature whose value is unknown (I{not} a feature without a
value). Two feature structures that represent (potentially
overlapping) information about the same object can be combined by
X{unification}. When two inconsistant feature structures are unified,
the unification fails and raises an error.
Features can be specified using X{feature paths}, or tuples of feature names
that specify paths through the nested feature structures to a value.
Feature structures may contain reentrant feature values. A
X{reentrant feature value} is a single feature value that can be
accessed via multiple feature paths. Unification preserves the
reentrance relations imposed by both of the unified feature
structures. After unification, any extensions to a reentrant feature
value will be visible using any of its feature paths.
Feature structure variables are encoded using the L{Variable} class. The scope
of a variable is determined by the X{bindings} used when the structure
including that variable is unified. Bindings can be reused between unifications
to ensure that variables with the same name get the same value.
"""
from copy import copy, deepcopy
import re
import yaml
import unittest
import sys
class UnificationFailure(Exception):
"""
An exception that is raised when two values cannot be unified.
"""
pass
def isMapping(obj):
"""
Determine whether to treat a given object as a feature structure. The
test is whether it responds to C{has_key}. This can be overridden if the
object includes an attribute or method called C{_no_feature}.
@param obj: The object to be tested
@type obj: C{object}
@return: True iff the object can be treated as a feature structure
@rtype: C{bool}
"""
return ('has_key' in dir(obj)) and ('_no_feature' not in dir(obj))
class _FORWARD(object):
"""
_FORWARD is a singleton value, used in unification as a flag that a value
has been forwarded to another object.
This class itself is used as the singleton value. It cannot be
instantiated.
"""
def __init__(self):
raise TypeError, "The _FORWARD class is not meant to be instantiated"
class Variable(object):
"""
A Variable is an object that can be used in unification to hold an
initially unknown value. Two equivalent Variables, for example, can be used
to require that two features have the same value.
When a Variable is assigned a value, it will eventually be replaced by
that value. However, in order to make that value show up everywhere the
variable appears, the Variable temporarily stores its assigned value and
becomes a I{bound variable}. Bound variables do not appear in the results
of unification.
Variables are distinguished by their name, and by the dictionary of
I{bindings} that is being used to determine their values. Two variables can
have the same name but be associated with two different binding
dictionaries: those variables are not equal.
"""
_next_numbered_id = 1
def __init__(self, name=None, value=None):
"""
Construct a new feature structure variable.
The value should be left at its default of None; it is only used
internally to copy bound variables.
@type name: C{string}
@param name: An identifier for this variable. Two C{Variable} objects
with the same name will be given the same value in a given dictionary
of bindings.
"""
self._uid = Variable._next_numbered_id
Variable._next_numbered_id += 1
if name is None: name = self._uid
self._name = str(name)
self._value = value
def name(self):
"""
@return: This variable's name.
@rtype: C{string}
"""
return self._name
def value(self):
"""
If this varable is bound, find its value. If it is unbound or aliased
to an unbound variable, returns None.
@return: The value of this variable, if any.
@rtype: C{object}
"""
if isinstance(self._value, Variable): return self._value.value()
else: return self._value
def copy(self):
"""
@return: A copy of this variable.
@rtype: C{Variable}
"""
return Variable(self.name(), self.value())
def forwarded_self(self):
"""
Variables are aliased to other variables by one variable _forwarding_
to the other. The first variable simply has the second as its value,
but it acts like the second variable's _value_ is its value.
forwarded_self returns the final Variable object that actually stores
the value.
@return: The C{Variable} responsible for storing this variable's value.
@rtype: C{Variable}
"""
if isinstance(self._value, Variable):
return self._value.forwarded_self()
else: return self
def bindValue(self, value, ourbindings, otherbindings):
"""
Bind this variable to a value. C{ourbindings} are the bindings that
accompany the feature structure this variable came from;
C{otherbindings} are the bindings from the structure it's being unified
with.
@type value: C{object}
@param value: The value to be assigned.
@type ourbindings: C{dict}
@param ourbindings: The bindings associated with this variable.
@type otherbindings: C{dict}
@param otherbindings: The bindings associated with the value being
assigned. (May be identical to C{ourbindings}.)
"""
if isinstance(self._value, Variable):
# Forward the job of binding to the variable we're aliased to.
return self._value.bindValue(value, ourbindings, otherbindings)
if self._value is None:
# This variable is unbound, so bind it.
self._value = value
else:
# This variable is already bound; try to unify the existing value
# with the new one.
self._value = unify(self._value, value, ourbindings, otherbindings)
def forwardTo(self, other, ourbindings, otherbindings):
"""
A unification wants this variable to be aliased to another variable.
Forward this variable to the other one, and return the other.
@type other: C{Variable}
@param other: The variable to replace this one.
@type ourbindings: C{dict}
@param ourbindings: The bindings associated with this variable.
@type otherbindings: C{dict}
@param otherbindings: The bindings associated with the other variable.
(May be identical to C{ourbindings}.)
@return: C{other}
@rtype: C{Variable}
"""
other.bindValue(self.value(), ourbindings, otherbindings)
self._value = other
return other
def __hash__(self): return hash(self._uid)
def __cmp__(self, other):
"""
Variables are equal if they are the same object or forward to the
same object. Variables with the same name may still be unequal.
"""
if not isinstance(other, Variable): return -1
if isinstance(self._value, Variable): return cmp(self._value, other)
else: return cmp(self._uid, other._uid)
def __repr__(self):
if self._value is None: return '?%s' % self._name
else: return '?%s: %r' % (self._name, self._value)
def variable_representer(dumper, var):
"Output variables in YAML as ?name."
return dumper.represent_scalar(u'!var', u'?%s' % var.name())
yaml.add_representer(Variable, variable_representer)
def variable_constructor(loader, node):
"Recognize variables written as ?name in YAML."
value = loader.construct_scalar(node)
name = value[1:]
return Variable(name)
yaml.add_constructor(u'!var', variable_constructor)
yaml.add_implicit_resolver(u'!var', re.compile(r'^\?\w+$'))
def _copy_and_bind(feature, bindings, memo=None):
"""
Make a deep copy of a feature structure, preserving reentrance using the
C{memo} dictionary. Meanwhile, variables are replaced by their bound
values, if these values are already known, and variables with unknown
values are given placeholder bindings.
"""
if memo is None: memo = {}
if id(feature) in memo: return memo[id(feature)]
if isinstance(feature, Variable) and bindings is not None:
if not bindings.has_key(feature.name()):
bindings[feature.name()] = feature.copy()
result = _copy_and_bind(bindings[feature.name()], None, memo)
else:
if isMapping(feature):
# Construct a new object of the same class
result = feature.__class__()
for (key, value) in feature.items():
result[key] = _copy_and_bind(value, bindings, memo)
else: result = feature
memo[id(feature)] = result
memo[id(result)] = result
return result
def unify(feature1, feature2, bindings1=None, bindings2=None):
"""
In general, the 'unify' procedure takes two values, and either returns a
value that provides the information provided by both values, or fails if
that is impossible.
These values can have any type, but fall into a few general cases:
- Values that respond to C{has_key} represent feature structures. The
C{unify} procedure will recurse into them and unify their inner values.
- L{Variable}s represent an unknown value, and are handled specially.
The values assigned to variables are tracked using X{bindings}.
- C{None} represents the absence of information.
- Any other value is considered a X{base value}. Base values are
compared to each other with the == operation.
The value 'None' represents the absence of any information. It specifies no
properties and acts as the identity in unification.
>>> unify(3, None)
3
>>> unify(None, 'fish')
'fish'
A base value unifies with itself, but not much else.
>>> unify(True, True)
True
>>> unify([1], [1])
[1]
>>> unify('a', 'b')
Traceback (most recent call last):
...
UnificationFailure
When two mappings (representing feature structures, and usually implemented
as dictionaries) are unified, any chain of keys that accesses a value in
either mapping will access an equivalent or more specific value in the
unified mapping. If this is not possible, UnificationFailure is raised.
>>> f1 = dict(A=dict(B='b'))
>>> f2 = dict(A=dict(C='c'))
>>> unify(f1, f2) == dict(A=dict(B='b', C='c'))
True
The empty dictionary specifies no features. It unifies with any mapping.
>>> unify({}, dict(foo='bar'))
{'foo': 'bar'}
>>> unify({}, True)
Traceback (most recent call last):
...
UnificationFailure
Representing dictionaries in YAML form is useful for making feature
structures readable:
>>> f1 = yaml.load("number: singular")
>>> f2 = yaml.load("person: 3")
>>> print yaml.show(unify(f1, f2))
number: singular
person: 3
>>> f1 = yaml.load('''
... A:
... B: b
... D: d
... ''')
>>> f2 = yaml.load('''
... A:
... C: c
... D: d
... ''')
>>> print yaml.show(unify(f1, f2))
A:
B: b
C: c
D: d
Variables are names for unknown values. Variables are assigned values
that will make unification succeed. The values of variables can be reused
in later unifications if you provide a dictionary of _bindings_ from
variables to their values.
>>> bindings = {}
>>> print unify(Variable('x'), 5, bindings)
5
>>> print bindings
{'x': 5}
>>> print unify({'a': Variable('x')}, {}, bindings)
{'a': 5}
The same variable name can be reused in different binding dictionaries
without collision. In some cases, you may want to provide two separate
binding dictionaries to C{unify} -- one for each feature structure, so
their variables do not collide.
In the following examples, two different feature structures use the
variable ?x to require that two values are equal. The values assigned to
?x are consistent within each structure, but would be inconsistent if every
?x had to have the same value.
>>> f1 = yaml.load('''
... a: 1
... b: 1
... c: ?x
... d: ?x
... ''')
>>> f2 = yaml.load('''
... a: ?x
... b: ?x
... c: 2
... d: 2
... ''')
>>> bindings1 = {}
>>> bindings2 = {}
>>> print yaml.show(unify(f1, f2, bindings1, bindings2))
a: 1
b: 1
c: 2
d: 2
>>> print bindings1
{'x': 2}
>>> print bindings2
{'x': 1}
Feature structures can involve _reentrant_ values, where multiple feature
paths lead to the same value. This is represented by the features having
the same Python object as a value. (This kind of identity can be tested
using the C{is} operator.)
Unification preserves the properties of reentrance. So if a reentrant value
is changed by unification, it is changed everywhere it occurs, and it is
still reentrant. Reentrant features can even form cycles, although these
cycles currently cannot be printed through the current YAML library.
>>> f1 = yaml.load('''
... A: &1 # &1 defines a reference in YAML...
... B: b
... E:
... F: *1 # and *1 uses the previously defined reference.
... ''')
>>> f1['E']['F']['B']
'b'
>>> f1['A'] is f1['E']['F']
True
>>> f2 = yaml.load('''
... A:
... C: c
... E:
... F:
... D: d
... ''')
>>> f3 = unify(f1, f2)
>>> print yaml.show(f3)
A: &1
B: b
C: c
D: d
E:
F: *1
>>> f3['A'] is f3['E']['F'] # Showing that the reentrance still holds.
True
This unification creates a cycle:
>>> f1 = yaml.load('''
... F: &1 {}
... G: *1
... ''')
>>> f2 = yaml.load('''
... F:
... H: &2 {}
... G: *2
... ''')
>>> f3 = unify(f1, f2)
>>> print f3
{'G': {'H': {...}}, 'F': {'H': {...}}}
>>> print f3['F'] is f3['G']
True
>>> print f3['F'] is f3['G']['H']
True
>>> print f3['F'] is f3['G']['H']['H']
True
A cycle can also be created using variables instead of reentrance.
Here we supply a single set of bindings, so that it is used on both sides
of the unification, making ?x mean the same thing in both feature
structures.
>>> f1 = yaml.load('''
... F:
... H: ?x
... ''')
>>> f2 = yaml.load('''
... F: ?x
... ''')
>>> f3 = unify(f1, f2, {})
>>> print f3
{'F': {'H': {...}}}
>>> print f3['F'] is f3['F']['H']
True
>>> print f3['F'] is f3['F']['H']['H']
True
Two sets of bindings can be provided because the variable names on each
side of the unification may be unrelated. An example involves unifying the
following two structures, which each require that two values are
equivalent, and happen to both use ?x to express that requirement.
>>> f1 = yaml.load('''
... a: 1
... b: 1
... c: ?x
... d: ?x
... ''')
>>> f2 = yaml.load('''
... a: ?x
... b: ?x
... c: 2
... d: 2
... ''')
>>> bindings1 = {}
>>> bindings2 = {}
>>> # We could avoid defining two empty dictionaries by simply using the
>>> # defaults, with unify(f1, f2) -- but we want to be able to examine
>>> # the bindings afterward.
>>> print yaml.show(unify(f1, f2, bindings1, bindings2))
a: 1
b: 1
c: 2
d: 2
>>> print bindings1
{'x': 2}
>>> print bindings2
{'x': 1}
If a variable is unified with another variable, the two variables are
_aliased_ to each other; they share the same value, similarly to reentrant
feature structures. This is represented in a set of bindings as one
variable having the other as its value.
>>> f1 = yaml.load('''
... a: ?x
... b: ?x
... ''')
>>> f2 = yaml.load('''
... b: ?y
... c: ?y
... ''')
>>> bindings = {}
>>> print yaml.show(unify(f1, f2, bindings))
a: &1 ?y
b: *1
c: *1
>>> print bindings
{'x': ?y}
Reusing the same variable bindings ensures that appropriate bindings are
made after the fact:
>>> bindings = {}
>>> f1 = {'a': Variable('x')}
>>> f2 = unify(f1, {'a': {}}, bindings)
>>> f3 = unify(f2, {'b': Variable('x')}, bindings)
>>> print yaml.show(f3)
a: &1 {}
b: *1
>>> print bindings
{'x': {}}
@param feature1: The first object to be unified.
@type feature1: C{object} (probably a mapping)
@param feature2: The second object to be unified.
@type feature2: C{object} (probably a mapping)
@param bindings1: The variable bindings associated with the first object.
@type bindings1: C{dict} or None
@param bindings2: The variable bindings associated with the second object,
if these are distinct from C{bindings1}.
@type bindings2: C{dict} or None
@return: The result of unifying the two objects.
@rtype: C{object} (probably a mapping)
"""
if bindings1 is None and bindings2 is None:
bindings1 = {}
bindings2 = {}
else:
if bindings1 is None: bindings1 = {}
if bindings2 is None: bindings2 = bindings1
# Make copies of the two structures (since the unification algorithm is
# destructive). Use the same memo, to preserve reentrance links between
# them.
copymemo = {}
copy1 = _copy_and_bind(feature1, bindings1, copymemo)
copy2 = _copy_and_bind(feature2, bindings2, copymemo)
# Preserve links between bound variables and the two feature structures.
for b in (bindings1, bindings2):
for (vname, value) in b.items():
value_id = id(value)
if value_id in copymemo:
b[vname] = copymemo[value_id]
# Go on to doing the unification.
unified = _destructively_unify(copy1, copy2, bindings1, bindings2, {})
_apply_forwards_to_bindings(bindings1)
_apply_forwards_to_bindings(bindings2)
_apply_forwards(unified, {})
unified = _lookup_values(unified, {}, remove=False)
_lookup_values(bindings1, {}, remove=True)
_lookup_values(bindings2, {}, remove=True)
return unified
def _destructively_unify(feature1, feature2, bindings1, bindings2, memo):
"""
Attempt to unify C{self} and C{other} by modifying them
in-place. If the unification succeeds, then C{self} will
contain the unified value, and the value of C{other} is
undefined. If the unification fails, then a
UnificationFailure is raised, and the values of C{self}
and C{other} are undefined.
"""
if memo.has_key((id(feature1), id(feature2))):
return memo[id(feature1), id(feature2)]
unified = _do_unify(feature1, feature2, bindings1, bindings2, memo)
memo[id(feature1), id(feature2)] = unified
return unified
def _do_unify(feature1, feature2, bindings1, bindings2, memo):
"""
Do the actual work of _destructively_unify when the result isn't memoized.
"""
# Trivial cases.
if feature1 is None: return feature2
if feature2 is None: return feature1
if feature1 is feature2: return feature1
# Deal with variables by binding them to the other value.
if isinstance(feature1, Variable):
if isinstance(feature2, Variable):
# If both objects are variables, forward one to the other. This
# has the effect of unifying the variables.
return feature1.forwardTo(feature2, bindings1, bindings2)
else:
feature1.bindValue(feature2, bindings1, bindings2)
return feature1
if isinstance(feature2, Variable):
feature2.bindValue(feature1, bindings2, bindings1)
return feature2
# If it's not a mapping or variable, it's a base object, so we just
# compare for equality.
if not isMapping(feature1):
if feature1 == feature2: return feature1
else:
raise UnificationFailure
if not isMapping(feature2): raise UnificationFailure
# At this point, we know they're both mappings.
# Do the destructive part of unification.
while feature2.has_key(_FORWARD): feature2 = feature2[_FORWARD]
feature2[_FORWARD] = feature1
for (fname, val2) in feature2.items():
if fname == _FORWARD: continue
val1 = feature1.get(fname)
feature1[fname] = _destructively_unify(val1, val2, bindings1, bindings2, memo)
return feature1
def _apply_forwards(feature, visited):
"""
Replace any feature structure that has a forward pointer with
the target of its forward pointer (to preserve reentrance).
"""
if not isMapping(feature): return
if visited.has_key(id(feature)): return
visited[id(feature)] = True
for fname, fval in feature.items():
if isMapping(fval):
while fval.has_key(_FORWARD):
fval = fval[_FORWARD]
feature[fname] = fval
_apply_forwards(fval, visited)
def _lookup_values(mapping, visited, remove=False):
"""
The unification procedure creates _bound variables_, which are Variable
objects that have been assigned a value. Bound variables are not useful
in the end result, however, so they should be replaced by their values.
This procedure takes a mapping, which may be a feature structure or a
binding dictionary, and replaces bound variables with their values.
If the dictionary is a binding dictionary, then 'remove' should be set to
True. This ensures that unbound, unaliased variables are removed from the
dictionary. If the variable name 'x' is mapped to the unbound variable ?x,
then, it should be removed. This is not done with features, because a
feature named 'x' can of course have a variable ?x as its value.
"""
if isinstance(mapping, Variable):
# Because it's possible to unify bare variables, we need to gracefully
# accept a variable in place of a dictionary, and return a result that
# is consistent with that variable being inside a dictionary.
#
# We can't remove a variable from itself, so we ignore 'remove'.
var = mapping
if var.value() is not None:
return var.value()
else:
return var.forwarded_self()
if not isMapping(mapping): return mapping
if visited.has_key(id(mapping)): return mapping
visited[id(mapping)] = True
for fname, fval in mapping.items():
if isMapping(fval):
_lookup_values(fval, visited)
elif isinstance(fval, Variable):
if fval.value() is not None:
mapping[fname] = fval.value()
if isMapping(mapping[fname]):
_lookup_values(mapping[fname], visited)
else:
newval = fval.forwarded_self()
if remove and newval.name() == fname:
del mapping[fname]
else:
mapping[fname] = newval
return mapping
def _apply_forwards_to_bindings(bindings):
"""
Replace any feature structures that have been forwarded by their new
identities.
"""
for (key, value) in bindings.items():
if isMapping(value) and value.has_key(_FORWARD):
while value.has_key(_FORWARD):
value = value[_FORWARD]
bindings[key] = value
def test():
"Run unit tests on unification."
import doctest
doctest.testmod()
if __name__ == "__main__":
test()
| Python |
# Contributed by Peter Wang
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse.featurechart import *
from en.parser.nltk_lite.parse.grammarfile import GrammarFile
from en.parser.nltk_lite.draw.tree import draw_trees
from en.parser.nltk_lite.parse import bracket_parse
from en.parser.nltk_lite.parse import tree
"""
An implementation of the Hole Semantics model, following Blackburn and Bos,
Representation and Inference for Natural Language (CSLI, 2005).
The semantic representations are built by the grammar hole.cfg.
This module contains driver code to read in sentences and parse them
according to a hole semantics grammar.
After parsing, the semantic representation is in the form of an underspecified
representation that is not easy to read. We use a "plugging" algorithm to
convert that representation into first-order logic formulas. These can be
displayed textually or graphically.
"""
# Note that in this code there may be multiple types of trees being referred to:
#
# 1. parse trees
# 2. the underspecified representation
# 3. first-order logic formula trees
# 4. the search space when plugging (search tree)
#
class HoleSemantics:
"""
This class holds the broken-down components of a hole semantics, i.e. it
extracts the holes, labels, logic formula fragments and constraints out of
a big conjunction of such as produced by the hole semantics grammar. It
then provides some operations on the semantics dealing with holes, labels
and finding legal ways to plug holes with labels.
"""
def __init__(self, usr):
"""
Constructor. `usr' is a tree of nodes that can take the forms:
(and t t')
(hole v)
(label v)
(: v phi)
(leq v v)
where
t, t' are subtrees
v is a variable
phi is a formula fragment
"""
self.holes = set() # set of variables which were asserted hole(x)
self.labels = set() # set of variables which were asserted label(x)
self.fragments = {} # mapping of label -> formula fragment
self.constraints = set() # set of Constraints
self._break_down(usr)
self.top_most_labels = self._find_top_most_labels()
self.top_hole = self._find_top_hole()
def is_label(self, x):
"""Return true if x is a label in this semantic representation."""
return x in self.labels
def is_hole(self, x):
"""Return true if x is a hole in this semantic representation."""
return x in self.holes
def is_node(self, x):
"""
Return true if x is a node (label or hole) in this semantic
representation.
"""
return self.is_label(x) or self.is_hole(x)
def _break_down(self, usr):
"""
Extract holes, labels, formula fragments and constraints from the hole
semantics underspecified representation (USR).
"""
assert isinstance(usr, Tree)
# (and X Y)
if usr.node == 'and':
self._break_down(usr[0])
self._break_down(usr[1])
# (hole H) -- H is a hole
elif usr.node == 'hole':
hole = usr[0]
self.holes.add(hole)
assert not self.is_label(hole)
# (label L) -- L is a label
elif usr.node == 'label':
label = usr[0]
self.labels.add(label)
assert not self.is_hole(label)
# (: L F) -- a formula fragment F with label L
elif usr.node == ':':
label = usr[0]
phi = usr[1]
assert not self.fragments.has_key(label)
self.fragments[label] = phi
# (leq L N) -- a constraint between the label L and node N
elif usr.node == 'leq':
lhs = usr[0]
rhs = usr[1]
self.constraints.add(Constraint(lhs, rhs))
else:
raise ValueError(usr.node)
def _find_top_most_labels(self):
"""
Return the set of labels which are not referenced directly as part of
another formula fragment. These will be the top-most labels for the
subtree that they are part of.
"""
top_most_labels = self.labels.copy()
for f in self.fragments.itervalues():
for arg in f:
if self.is_label(arg):
top_most_labels.discard(arg)
return top_most_labels
def _find_top_hole(self):
"""
Return the hole that will be the top of the formula tree.
"""
top_hole = self.holes.copy()
for f in self.fragments.itervalues():
for arg in f:
if self.is_hole(arg):
top_hole.discard(arg)
assert len(top_hole) == 1 # it must be unique
return top_hole.pop()
def pluggings(self):
"""
Calculate and return all the legal pluggings (mappings of labels to
holes) of this semantics given the constraints.
"""
record = []
self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {},
record)
return record
def _plug_nodes(self, queue, potential_labels, plug_acc, record):
"""
Plug the nodes in `queue' with the labels in `potential_labels'.
Each element of `queue' is a tuple of the node to plug and the list of
ancestor holes from the root of the graph to that node.
`potential_labels' is a set of the labels which are still available for
plugging.
`plug_acc' is the incomplete mapping of holes to labels made on the
current branch of the search tree so far.
`record' is a list of all the complete pluggings that we have found in
total so far. It is the only parameter that is destructively updated.
"""
assert queue != []
(node, ancestors) = queue[0]
if self.is_hole(node):
# The node is a hole, try to plug it.
self._plug_hole(node, ancestors, queue[1:], potential_labels,
plug_acc, record)
else:
assert self.is_label(node)
# The node is a label. Replace it in the queue by the holes and
# labels in the formula fragment named by that label.
phi = self.fragments[node]
head = [(a, ancestors) for a in phi if self.is_node(a)]
self._plug_nodes(head + queue[1:], potential_labels,
plug_acc, record)
def _plug_hole(self, hole, ancestors0, queue, potential_labels0,
plug_acc0, record):
"""
Try all possible ways of plugging a single hole.
See _plug_nodes for the meanings of the parameters.
"""
# Add the current hole we're trying to plug into the list of ancestors.
assert hole not in ancestors0
ancestors = [hole] + ancestors0
# Try each potential label in this hole in turn.
for l in potential_labels0:
# Is the label valid in this hole?
if self._violates_constraints(l, ancestors):
continue
plug_acc = plug_acc0.copy()
plug_acc[hole] = l
potential_labels = potential_labels0.copy()
potential_labels.remove(l)
if len(potential_labels) == 0:
# No more potential labels. That must mean all the holes have
# been filled so we have found a legal plugging so remember it.
#
# Note that the queue might not be empty because there might
# be labels on there that point to formula fragments with
# no holes in them. _sanity_check_plugging will make sure
# all holes are filled.
self._sanity_check_plugging(plug_acc, self.top_hole, [])
record.append(plug_acc)
else:
# Recursively try to fill in the rest of the holes in the
# queue. The label we just plugged into the hole could have
# holes of its own so at the end of the queue. Putting it on
# the end of the queue gives us a breadth-first search, so that
# all the holes at level i of the formula tree are filled
# before filling level i+1.
# A depth-first search would work as well since the trees must
# be finite but the bookkeeping would be harder.
self._plug_nodes(queue + [(l, ancestors)], potential_labels,
plug_acc, record)
def _violates_constraints(self, label, ancestors):
"""
Return True if the `label' cannot be placed underneath the holes given
by the set `ancestors' because it would violate the constraints imposed
on it.
"""
for c in self.constraints:
if c.lhs == label:
if c.rhs not in ancestors:
return True
return False
def _sanity_check_plugging(self, plugging, node, ancestors):
"""
Make sure that a given plugging is legal. We recursively go through
each node and make sure that no constraints are violated.
We also check that all holes have been filled.
"""
if self.is_hole(node):
ancestors = [node] + ancestors
label = plugging[node]
else:
label = node
assert self.is_label(label)
for c in self.constraints:
if c.lhs == label:
assert c.rhs in ancestors
phi = self.fragments[label]
for arg in phi:
if self.is_node(arg):
self._sanity_check_plugging(plugging, arg, [label] + ancestors)
def formula_tree(self, plugging):
"""
Return the first-order logic formula tree for this underspecified
representation using the plugging given.
"""
return self._formula_tree(plugging, self.top_hole)
def _formula_tree(self, plugging, node):
if node in plugging:
return self._formula_tree(plugging, plugging[node])
elif self.fragments.has_key(node):
frag = self.fragments[node]
children = [self._formula_tree(plugging, arg) for arg in frag]
return FOLTree(frag.node, children)
else:
return node
class Constraint:
"""
This class represents a constraint of the form (L =< N),
where L is a label and N is a node (a label or a hole).
"""
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __eq__(self, other):
if self.__class__ == other.__class__:
return self.lhs == other.lhs and self.rhs == other.rhs
else:
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return '(%s =< %s)' % (self.lhs, self.rhs)
class FOLTree(Tree):
"""
A Tree for first-order logic formulas that prints differently. Nodes with
operator names are printed in infix. Nodes which have unrecognised names
are assumed to be predicates.
"""
def __str__(self):
if self.node == 'ALL':
var = self[0]
st = self[1]
return '(ALL %s %s)' % (var, st)
elif self.node == 'SOME':
var = self[0]
st = self[1]
return '(SOME %s %s)' % (var, st)
elif self.node == 'AND':
return '(%s /\ %s)' % (self[0], self[1])
elif self.node == 'IMP':
return '(%s -> %s)' % (self[0], self[1])
# add more operators here
else:
# otherwise treat it as a predicate with arguments
args = ', '.join([str(arg) for arg in self])
return '%s(%s)' % (self.node, args)
def main():
import sys
from optparse import OptionParser, OptionGroup
usage = """%%prog [options] [grammar_file]""" % globals()
opts = OptionParser(usage=usage)
opts.add_option("-c", "--components",
action="store_true", dest="show_components", default=0,
help="show hole semantics components")
opts.add_option("-r", "--raw",
action="store_true", dest="show_raw", default=0,
help="show the raw hole semantics expression")
opts.add_option("-d", "--drawtrees",
action="store_true", dest="draw_trees", default=0,
help="show formula trees in a GUI window")
opts.add_option("-v", "--verbose",
action="count", dest="verbosity", default=0,
help="show more information during parse")
(options, args) = opts.parse_args()
if len(args) > 0:
filename = args[0]
else:
filename = 'hole.cfg'
print 'Reading grammar file', filename
grammar = GrammarFile.read_file(filename)
parser = grammar.earley_parser(trace=options.verbosity)
# Prompt the user for a sentence.
print 'Sentence: ',
line = sys.stdin.readline()[:-1]
# Parse the sentence.
tokens = list(tokenize.whitespace(line))
trees = parser.get_parse_list(tokens)
print 'Got %d different parses' % len(trees)
for tree in trees:
# Get the semantic feature from the top of the parse tree.
sem = tree[0].node['sem'].simplify()
# Skolemise away all quantifiers. All variables become unique.
sem = sem.skolemise()
# Reparse the semantic representation from its bracketed string format.
# I find this uniform structure easier to handle. It also makes the
# code mostly independent of the lambda calculus classes.
usr = bracket_parse(str(sem))
# Break the hole semantics representation down into its components
# i.e. holes, labels, formula fragments and constraints.
hole_sem = HoleSemantics(usr)
# Maybe print the raw semantic representation.
if options.show_raw:
print
print 'Raw expression'
print usr
# Maybe show the details of the semantic representation.
if options.show_components:
print
print 'Holes: ', hole_sem.holes
print 'Labels: ', hole_sem.labels
print 'Constraints: ', hole_sem.constraints
print 'Top hole: ', hole_sem.top_hole
print 'Top labels: ', hole_sem.top_most_labels
print 'Fragments:'
for (l,f) in hole_sem.fragments.items():
print '\t%s: %s' % (l, f)
# Find all the possible ways to plug the formulas together.
pluggings = hole_sem.pluggings()
# Build FOL formula trees using the pluggings.
trees = map(hole_sem.formula_tree, pluggings)
# Print out the formulas in a textual format.
n = 1
for tree in trees:
print
print '%d. %s' % (n, tree)
n += 1
# Maybe draw the formulas as trees.
if options.draw_trees:
draw_trees(*trees)
print
print 'Done.'
if __name__ == '__main__':
main()
| Python |
# New lambda system (Steven Bird)
class Lambda:
def __init__(self, *args):
if isinstance(args[0], str):
if len(args) == 1:
self._var = args[0]
self._type = 'v'
else:
self._var = args[0]
self._term = args[1]
self._type = 'l'
else:
self._f = args[0]
self._arg = args[1]
self._type = 'f'
def equals(self, other):
if self.__class__ is not other.__class__:
return False
elif self._type is not other._type:
return False
elif self._type == 'v':
return self._var == other._var
elif self._type == 'l':
return self._var == other._var and self._term.equals(other._term)
elif self._type == 'f':
return self._f.equals(other._f) and self._arg.equals(other._arg)
def variables(self):
if self._type == 'v':
return set([self._var])
elif self._type == 'l':
return set([self._var]).union(self._term.variables())
elif self._type == 'f':
return self._f.variables().union(self._arg.variables())
def free(self):
if self._type == 'v':
return set([self._var])
elif self._type == 'l':
return self._term.free().difference(set([self._var]))
elif self._type == 'f':
return self._f.free().union(self._arg.free())
def subterms(self):
if self._type == 'v':
return set([self])
elif self._type == 'l':
return self._term.subterms().union([self])
elif self._type == 'f':
return self._f.subterms().union(self._arg.subterms()).union(set([self]))
def replace(self, variable, expression):
if self._type == 'v':
if self._var == variable:
return expression
else:
return self
elif self._type == 'l':
return Lambda(self._var,\
self._term.replace(variable, expression))
elif self._type == 'f':
return Lambda(self._f.replace(variable, expression),\
self._arg.replace(variable, expression))
def simplify(self):
if self._type == 'v':
return self
elif self._type == 'l':
return Lambda(self._var, self._term.simplify())
elif self._type == 'f':
f = self._f.simplify()
arg = self._arg.simplify()
if f._type == 'l':
return f._term.replace(f._var, arg).simplify()
else:
return self
def __str__(self, continuation=0):
if self._type == 'v':
return '%s' % self._var
elif self._type == 'l':
if continuation:
prefix = ' '
else:
prefix = '\\'
if self._term._type == 'l':
return '%s%s%s' % (prefix, self._var, self._term.__str__(1))
else:
return '%s%s.%s' % (prefix, self._var, self._term)
elif self._type == 'f':
str_f = str(self._f)
if self._f._type == 'f':
str_f = str_f[1:-1]
return '(%s %s)' % (str_f, self._arg)
def __repr__(self):
if self._type == 'v':
return "Lambda('%s')" % self._var
elif self._type == 'l':
return "Lambda('%s', '%s')" % (self._var, self._term)
elif self._type == 'f':
return "Lambda('%s', '%s')" % (self._f, self._arg)
def expressions():
"""Return a sequence of test expressions."""
A = Lambda('a')
X = Lambda('x')
Y = Lambda('y')
Z = Lambda('z')
XA = Lambda(X, A)
XY = Lambda(X, Y)
XZ = Lambda(X, Z)
YZ = Lambda(Y, Z)
XYZ = Lambda(XY, Z)
xX = Lambda('x', X)
xyX = Lambda('x', Lambda('y', X))
xXY = Lambda('x', XY)
S = Lambda(xyX, A)
B = Lambda('x', Lambda('y', Lambda('z', Lambda(X, YZ))))
C = Lambda('x', Lambda('y', Lambda('z', Lambda(XZ, Y))))
O = Lambda('x', Lambda('y', XY))
N = Lambda(xX, A)
P = Lambda(Lambda('x', XA), xX)
return [N, P, S]
for expr in expressions():
print expr, "->", expr.simplify()
| Python |
import math
import os
# tagger importing
from en.parser.nltk_lite import tag
from en.parser.nltk_lite.tag import SequentialBackoff
# work-around while marshal is not moved into standard tree
from en.parser.nltk_lite.contrib.marshal import MarshalDefault ; Default = MarshalDefault
from en.parser.nltk_lite.contrib.marshal import MarshalUnigram ; Unigram = MarshalUnigram
from en.parser.nltk_lite.contrib.marshal import MarshalAffix ; Affix = MarshalAffix
from en.parser.nltk_lite.contrib.marshal import MarshalNgram ; Ngram = MarshalNgram
from en.parser.nltk_lite.contrib.marshalbrill import *
class CombinedTagger (SequentialBackoff):
def __init__ (self):
self._tagger = []
self._brill = None
def _append_default (self, default_tag, verbose=False):
self._tagger.append( Default(default_tag) )
def _append_affix (self, a_len, w_len, train_sents, verbose=False):
self._tagger.append( Affix(a_len, w_len, backoff=self._tagger[-1]) )
self._tagger[-1].train([train_sents], verbose)
def _append_unigram (self, train_sents, verbose=False):
self._tagger.append( Unigram(backoff=self._tagger[-1]) )
self._tagger[-1].train(train_sents, verbose)
def _append_ngram (self, size, train_sents, verbose=False, cutoff_value=0.001):
cutoff = math.floor(len(train_sents)*cutoff_value)
self._tagger.append( Ngram(size, cutoff=cutoff, backoff=self._tagger[-1]) )
self._tagger[-1].train([train_sents], verbose)
def _append_brill (self, train_sents, max_rules, min_score=2, trace=0):
templates = [
SymmetricProximateTokensTemplate(ProximateTagsRule, ( 1, 1) ),
SymmetricProximateTokensTemplate(ProximateTagsRule, ( 2, 2) ),
SymmetricProximateTokensTemplate(ProximateTagsRule, ( 1, 2) ),
SymmetricProximateTokensTemplate(ProximateTagsRule, ( 1, 3) ),
SymmetricProximateTokensTemplate(ProximateWordsRule, ( 1, 1) ),
SymmetricProximateTokensTemplate(ProximateWordsRule, ( 2, 2) ),
SymmetricProximateTokensTemplate(ProximateWordsRule, ( 1, 2) ),
SymmetricProximateTokensTemplate(ProximateWordsRule, ( 1, 3) ),
ProximateTokensTemplate (ProximateTagsRule, (-1, -1), (1,1) ),
ProximateTokensTemplate (ProximateWordsRule, (-1, -1), (1,1) ),
]
trainer = BrillTrainer(self._tagger[-1], templates, trace)
self._brill = trainer.train(train_sents, max_rules, min_score)
def marshal (self, basepath):
# create the model files, one for each tagger (*.mod) plus a general one
handler = file(os.path.join(basepath, "model.mrs"), "w")
for index in range(len(self._tagger)):
filename = os.path.join(basepath, "tagger%02d.mod" % index)
handler.write("%s %s\n" % (self._tagger[index]._classname, filename) )
self._tagger[index].marshal(filename)
filename = os.path.join(basepath, "tagger%02d.mod" % (index+1))
handler.write("%s %s\n" % (self._brill._classname, filename) )
self._brill.marshal(filename)
handler.close()
def unmarshal (self, basepath):
# clear taggers
self._tagger = []
self._brill = None
# read model's configuration
filename = os.path.join(basepath, "model.mrs")
handler = file(filename, "r")
model = handler.readlines()
handler.close()
model = [line[:-1] for line in model] # remove "\n"s
model = [line for line in model if len(line) > 0] # remove empty lines
# tagger by tagger
for tagger in model:
tagger_type, tagger_file = tagger.split(" ")
if tagger_type == "DefaultTagger":
self._tagger.append( Default("") )
self._tagger[-1].unmarshal(tagger_file)
elif tagger_type == "AffixTagger":
self._tagger.append( Affix(1, 2, backoff=self._tagger[-1]) )
self._tagger[-1].unmarshal(tagger_file)
elif tagger_type == "UnigramTagger":
self._tagger.append( Unigram(backoff=self._tagger[-1]) )
self._tagger[-1].unmarshal(tagger_file)
elif tagger_type == "NgramTagger":
self._tagger.append( Ngram(2, backoff=self._tagger[-1]) )
self._tagger[-1].unmarshal(tagger_file)
elif tagger_type == "BrillTagger":
self._brill = Brill(self._tagger[-1], [])
self._brill.unmarshal(tagger_file)
else:
print "error, tagger type not recognized."
def exemple_train (self, train_sents, verbose=False):
self._append_default("N")
self._append_affix(-2, 6, train_sents, verbose)
self._append_affix(-3, 7, train_sents, verbose)
self._append_affix(-4, 8, train_sents, verbose)
self._append_affix(-5, 9, train_sents, verbose)
self._append_unigram(train_sents, verbose)
self._append_ngram(2, train_sents, verbose)
self._append_brill(train_sents, 1, 2, trace=3)
def tag_one (self, token):
return self._tagger[-1].tag_one(token)
def tag (self, tokens, verbose=False):
return self._tagger[-1].tag(tokens, verbose)
def create_tagger (train_sents):
ct = CombinedTagger()
# ct.example_train(train_sents, True)
ct.unmarshal("tresoldi")
tokens = "Mauro viu o livro sobre a mesa".split()
print list(ct.tag(tokens))
# tests
acc = tag.accuracy(ct, [train_sents])
print 'Accuracy = %4.2f%%' % (100 * acc)
| Python |
# Natural Language Toolkit: Kimmo Morphological Analyzer
#
# Copyright (C) 2001-2006 MIT
# Author: Carl de Marcken <carl@demarcken.org>
# Beracah Yankama <beracah@mit.edu>
# Robert Berwick <berwick@ai.mit.edu>
#
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Kimmo Morphological Analyzer. Supports proper recognizer completion,
generator ordering, kimmo control class, loader for own file format,
also .rul compatible with old pckimmo.
"""
# TODO: remove Unix dependencies
import Tkinter
import os, re, sys, types, string, glob, time, md5
from en.parser.nltk_lite.contrib.fsa import *
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
############################# KIMMO GUI ##################################
"""
A gui for input of generative & recognition models
need 3 input boxes, one for text input, lexicon box, rules box
one output box?
need alternations rules and lexicon
plus 1 input test & recognition box.
we want to "step" through alternations
we want to "show" the rules that fire.
and we want batch mode, big file, or big input test with output.
"""
###########################################################################
from ScrolledText import ScrolledText
class KimmoGUI:
def __init__(self, grammar, text, title='Kimmo Interface v1.78'):
self.root = None
try:
self.dbgTracing = None
self.highlightIds = []
self.tagId = 0
self.lexmd5 = None
self.rulemd5 = None
self.lexicalGraphWindow = None
self.rulfilename = ''
self.lexfilename = ''
self.altfilename = ''
self.kimmoResultFile = ''
self.helpFilename = 'kimmo.help'
self._root = Tkinter.Tk()
self._root.title(title)
ctlbuttons = Tkinter.Frame(self._root)
ctlbuttons.pack(side='top', fill='x')
level1 = Tkinter.Frame(self._root)
level1.pack(side='top', fill='none')
Tkinter.Frame(self._root).pack(side='top', fill='none')
level2 = Tkinter.Frame(self._root)
level2.pack(side='top', fill='x')
buttons = Tkinter.Frame(self._root)
buttons.pack(side='top', fill='none')
batchFrame = Tkinter.Frame(self._root)
batchFrame.pack(side='top', fill='x')
self.batchpath = Tkinter.StringVar()
Tkinter.Label(batchFrame, text="Batch File:").pack(side='left')
Tkinter.Entry(batchFrame, background='white', foreground='black',
width=30, textvariable=self.batchpath).pack(side='left')
Tkinter.Button(batchFrame, text='Go!',
background='#a0c0c0', foreground='black',
command=self.batch).pack(side='left')
self.debugWin = Tkinter.StringVar() # change to a window and field eventually.
Tkinter.Entry(batchFrame, background='grey', foreground='red',
width=30, textvariable=self.debugWin).pack(side='right')
self.wordIn = Tkinter.StringVar()
Tkinter.Label(level2, text="Generate or Recognize:").pack(side='left')
Tkinter.Entry(level2, background='white', foreground='black',
width=30, textvariable=self.wordIn).pack(side='left')
lexiconFrame = Tkinter.Frame(level1)
Tkinter.Label(lexiconFrame, text="Lexicon & Alternations").pack(side='top',
fill='x')
self.lexicon = ScrolledText(lexiconFrame, background='white',
foreground='black', width=50, height=36, wrap='none')
# setup the scrollbar
scroll = Tkinter.Scrollbar(lexiconFrame, orient='horizontal',command=self.lexicon.xview)
scroll.pack(side='bottom', fill='x')
self.lexicon.configure(xscrollcommand = scroll.set)
self.lexicon.pack(side='top')
midFrame = Tkinter.Frame(level1)
rulesFrame = Tkinter.Frame(midFrame)
rulesFrame.pack(side='top', fill='x')
Tkinter.Label(rulesFrame, text="Rules/Subsets").pack(side='top',
fill='x')
self.rules = ScrolledText(rulesFrame, background='white',
foreground='black', width=60, height=19, wrap='none')
# setup the scrollbar
scroll = Tkinter.Scrollbar(rulesFrame, orient='horizontal',command=self.rules.xview)
scroll.pack(side='bottom', fill='x')
self.rules.configure(xscrollcommand = scroll.set)
self.rules.pack(side='top')
midbetweenFrame = Tkinter.Frame(midFrame)
midbetweenFrame.pack(side='top', fill='x')
Tkinter.Button(midbetweenFrame, text='clear',
background='#f0f0f0', foreground='black',
command= lambda start=1.0, end=Tkinter.END : self.results.delete(start,end)
).pack(side='right')
Tkinter.Label(midbetweenFrame,
text="Results ").pack(side='right')
self.results = ScrolledText(midFrame, background='white',
foreground='black', width=60, height=13, wrap='none')
# setup the scrollbar
scroll = Tkinter.Scrollbar(midFrame, orient='horizontal',command=self.results.xview)
scroll.pack(side='bottom', fill='x')
self.results.configure(xscrollcommand = scroll.set)
self.results.pack(side='bottom')
"""
alternationFrame = Tkinter.Frame(level1)
Tkinter.Label(alternationFrame, text="Alternations").pack(side='top',
fill='x')
self.alternation = ScrolledText(alternationFrame, background='white',
foreground='black', width=1, wrap='none')
self.alternation.pack(side='top')
"""
Tkinter.Button(ctlbuttons, text='Quit',
background='#a0c0c0', foreground='black',
command=self.destroy).pack(side='left')
self.loadMenuButton = Tkinter.Menubutton(ctlbuttons, text='Load', background='#a0c0c0', foreground='black', relief='raised')
self.loadMenuButton.pack(side='left')
self.loadMenu=Tkinter.Menu(self.loadMenuButton,tearoff=0)
self.loadMenu.add_command(label='Load Lexicon', underline=0,command = lambda filetype='.lex', targetWindow = self.lexicon, tf = 'l' : self.loadTypetoTarget(filetype, targetWindow, tf))
self.loadMenu.add_command(label='Load Rules', underline=0,command = lambda filetype='.rul', targetWindow = self.rules, tf = 'r' : self.loadTypetoTarget(filetype, targetWindow, tf))
# self.loadMenu.add_command(label='Load Lexicon', underline=0,command = lambda filetype='.lex', targetWindow = self.lexicon : loadTypetoTarget(self, filetype, targetWindow))
self.loadMenuButton["menu"]=self.loadMenu
#
self.saveMenuButton = Tkinter.Menubutton(ctlbuttons, text='Save',background='#a0c0c0', foreground='black', relief='raised')
self.saveMenuButton.pack(side='left')
self.saveMenu=Tkinter.Menu(self.saveMenuButton,tearoff=0)
self.saveMenu.add_command(label='Save Lexicon', underline=0,command = lambda filename=self.lexfilename, sourceWindow = self.lexicon : self.writeToFilefromWindow(filename, sourceWindow,'w',0,'l'))
self.saveMenu.add_command(label='Save Rules', underline=0,command = lambda filename=self.rulfilename, sourceWindow = self.rules : self.writeToFilefromWindow(filename, sourceWindow,'w',0,'r'))
self.saveMenu.add_command(label='Save Results', underline=0,command = lambda filename='.results', sourceWindow = self.results : self.writeToFilefromWindow(filename, sourceWindow,'w',0))
self.saveMenu.add_command(label='Save All', underline=0,command = self.saveAll)
self.saveMenuButton["menu"]=self.saveMenu
Tkinter.Label(ctlbuttons, text=" Preset:").pack(side='left')
self.configValue = Tkinter.StringVar()
self.configsMenuButton = Tkinter.Menubutton(ctlbuttons, text='Configs', background='#a0c0c0', foreground='black', relief='raised')
self.configsMenuButton.pack(side='left')
self.configsMenu=Tkinter.Menu(self.configsMenuButton,tearoff=0)
# read the directory for cfgs, add them to the menu
# add path expander, to expand ~ & given home dirs.
# !!! this does not handle student student directories, if not the current dir!
currentconfigfiles = glob.glob('*.cfg')
for x in currentconfigfiles:
newname = x # [0:len(x)-4] # remove the '.cfg'
self.configsMenu.add_command(label=newname, underline=0,command = lambda newname=x : self.configLoader(newname)) # Callback(self.configLoader,newname))
# we want this to call load on the specific config file
if len(currentconfigfiles) == 0:
# configsMenu.add_command(label='<none>',underline=0)
self.configsMenuButton.configure(text='<none>')
self.configsMenuButton["menu"]=self.configsMenu
# toggle the different modes of this window
# Tkinter.Button(ctlbuttons, text='->',
# background='#ffd564', foreground='red',
# command=self.generate).pack(side='right')
#
# Tkinter.Checkbutton(ctlbuttons, text='Stepping',
# background='#b0f0d0', foreground='#008b45',
# command=self.generate).pack(side='right')
self.tracingbtn = Tkinter.Button(ctlbuttons, text='Tracing',
background='#fff0f0', foreground='black',
command=lambda : self.create_destroyDebugTracing()).pack(side='right')
self.graphMenuButton = Tkinter.Menubutton(ctlbuttons, text='Graph', background='#d0d0e8', foreground='black', relief='raised')
self.graphMenuButton.pack(side='right')
self.graphMenu=Tkinter.Menu(self.graphMenuButton,tearoff=0)
self.graphMenu.add_command(label='Graph Lexicon', underline=0,command = lambda which = 'l' : self.graph(which))
self.graphMenu.add_command(label='Graph FSA Rules', underline=0,command = lambda which = 'r' : self.graph(which))
# self.loadMenu.add_command(label='Load Lexicon', underline=0,command = lambda filetype='.lex', targetWindow = self.lexicon : loadTypetoTarget(self, filetype, targetWindow))
self.graphMenuButton["menu"]=self.graphMenu
self.helpbtn = Tkinter.Button(ctlbuttons, text='Help',
background='#f0fff0', foreground='black',
command=self.kimmoHelp).pack(side='right')
lexiconFrame.pack(side='left')
midFrame.pack(side='left')
# alternationFrame.pack(side='left')
Tkinter.Button(level2, text='Generate',
background='#a0c0c0', foreground='black',
command=self.generate).pack(side='left')
Tkinter.Button(level2, text='Recognize',
background='#a0c0c0', foreground='black',
command=self.recognize).pack(side='left')
# setup the vars for kimmo
# eventually make this a kimmo object
"""
self.klexicons = []
self.kalternations = []
self.ksubsets = []
self.kdefaults = []
self.krules = []
"""
self.kimmoinstance = None
self.kimmoResultFile = ''
self.traceWindow = ''
self.debug = False
self.configLoader('kimmo.cfg')
# self.batchpath.set("kimmo.batch_test")
# capture all print messages
self.phOut = PrintHook()
self.phOut.Start(self.capturePrint)
# Enter mainloop.
Tkinter.mainloop()
except:
print 'Error creating Tree View'
self.destroy()
raise
def init_menubar(self):
menubar = Tkinter.Menu(self._root)
filemenu = Tkinter.Menu(menubar, tearoff=0)
filemenu.add_command(label='Save Rules', underline=0,
command=self.save, accelerator='Ctrl-s')
self._root.bind('<Control-s>', self.save)
filemenu.add_command(label='Load Rules', underline=0,
command=self.load, accelerator='Ctrl-o')
self._root.bind('<Control-o>', self.load)
filemenu.add_command(label='Clear Rules', underline=0,
command=self.clear, accelerator='Ctrl-r')
self._root.bind('<Control-r>', self.clear)
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-q')
self._root.bind('<Control-q>', self.destroy)
menubar.add_cascade(label='File', underline=0,
menu=filemenu)
self._root.config(menu=menubar)
def guiError(self, *args):
self.debugWin.set(args[0].strip())
def create_destroyDebugTracing(self, *args):
# test creating tracing/debug window
if (self.dbgTracing):
self.dbgTracing.destroy()
self.dbgTracing = None
self.debug = False
else:
try:
# have in its own special di decial class
self.dbgTracing = Tkinter.Toplevel()
self.dbgTracing.title("Tracing/Debug")
dbgTraceFrame2 = Tkinter.Frame(self.dbgTracing)
dbgTraceFrame2.pack(side='top', fill='x')
dbgTraceFrame = Tkinter.Frame(self.dbgTracing)
dbgTraceFrame.pack(side='top', fill='x',expand='yes')
self.traceWindow = ScrolledText(dbgTraceFrame, background='#f4f4f4',
foreground='#aa0000', width=45, height=24, wrap='none')
Tkinter.Button(dbgTraceFrame2, text='clear',
background='#a0c0c0', foreground='black',
command= lambda start=1.0, end=Tkinter.END : self.traceWindow.delete(start,end)
).pack(side='right')
Tkinter.Button(dbgTraceFrame2, text='Save',
background='#a0c0c0', foreground='black',
command= lambda file=self.kimmoResultFile,windowName=self.traceWindow,mode='w',auto=0 : self.writeToFilefromWindow(file,windowName,mode,auto)
).pack(side='left')
scroll = Tkinter.Scrollbar(dbgTraceFrame, orient='horizontal',command=self.traceWindow.xview)
scroll.pack(side='bottom', fill='x')
self.traceWindow.configure(xscrollcommand = scroll.set)
self.traceWindow.pack(side='bottom')
self.debug = True
# this will automatically clean itself up.
self.dbgTracing.protocol("WM_DELETE_WINDOW", self.create_destroyDebugTracing)
except:
print 'Error creating Tree View'
self.dbgTracing.destroy()
self.dbgTracing = None
self.debug = False
raise
def writeToFilefromWindow(self, filename, windowName, mode, auto, wt=None):
# filename from var
# if not file: file='.txt'
# if append, add on, if overwrite, then ya
if not (auto and windowName and filename):
from tkFileDialog import asksaveasfilename
ftypes = [('Text file', '.txt'),('Rule file', '.rul'),('Lexicon file', '.lex'),('Alternations file', '.alt'),
('All files', '*')]
filename = asksaveasfilename(filetypes=ftypes,
defaultextension='', initialfile=filename)
if not filename:
self.guiError('Need File Name')
return
f = open(filename, 'w')
f.write(windowName.get(1.0,Tkinter.END))
f.close()
if filename:
if wt == 'l': self.lexfilename = filename
elif wt == 'r': self.rulfilename = filename
# create a window update class
# and a window resize class
# default save; all file names are known, so it saves to them.
def saveAll(self, *args):
# automatic write
self.writeToFilefromWindow(self.lexfilename,self.lexicon,'w',1)
self.writeToFilefromWindow(self.rulfilename,self.rules,'w',1)
# self.writeToFilefromWindow(self.altfilename,self.alternation,'w',1)
self.writeToFilefromWindow(self.resfilename,self.results,'w',1)
"""
def save(self, *args):
"Save a rule/lexicon set to a text file"
from tkFileDialog import asksaveasfilename
ftypes = [('Text file', '.txt'),
('All files', '*')]
filename = asksaveasfilename(filetypes=ftypes,
defaultextension='.txt')
if not filename: return
f = open(filename, 'w')
f.write('---- Rules -----\n%s\n' % '\n'.join(self.getRules(False)))
f.write('---- Lexicon -----\n%s\n' % '\n'.join(self.getLexicon(False)))
f.close()
"""
def configLoader(self,*args):
print args[0]
filename = args[0]
# if arg is a valid file, load by line.
# handle the different types of files
if filename:
f = read_kimmo_file(filename, self)
lines = f.readlines()
f.close()
# clear all panes
self.clear()
# now set the menu
self.configsMenuButton.configure(text=filename)
# reset gui name variables
# so that nothing gets overwritten.
# these file name variables will be changed if
# either the cfg changes it, or the person loads a different file
self.rulfilename = ''
self.lexfilename = ''
self.altfilename = ''
self.kimmoResultFile = ''
self.batchpath.set('')
for line in lines:
line = line.strip()
cfgargs = line.split(":")
for x in range(len(cfgargs)): cfgargs[x] = cfgargs[x].strip()
if len(line) == 0: continue
elif (line[0] == '#') or (line[0] == ';'): continue # comment
elif cfgargs[0] == 'lexicon':
self.lexfilename = self.loadIntoWindow(os.path.expanduser(cfgargs[1]),self.lexicon)
elif cfgargs[0] == 'rules':
self.rulfilename = self.loadIntoWindow(os.path.expanduser(cfgargs[1]),self.rules)
#elif cfgargs[0] == 'alternations':
# self.loadIntoWindow(cfgargs[1],self.alternation)
# self.altfilename = cfgargs[1]
elif cfgargs[0] == 'results':
self.kimmoResultFile = os.path.expanduser(cfgargs[1])
self.resfilename = os.path.expanduser(cfgargs[1])
elif cfgargs[0] == 'batch': self.batchpath.set(os.path.expanduser(cfgargs[1]))
# !
else: self.guiError('unknown line :' + line)
# print line
else: self.guiError('Empty Filename')
def loadIntoWindow(self, filename, windowField):
"Load rule/lexicon set from a text file directly into the window pane specified"
# filename = args[0]
# windowField = args[1]
if filename:
filename = os.path.expanduser(filename)
f = read_kimmo_file(filename, self)
lines = f.readlines()
f.close()
text = []
for line in lines:
line = line.strip()
text.append(line)
# empty the window now that the file was valid
windowField.delete(1.0, Tkinter.END)
windowField.insert(1.0, '\n'.join(text))
return filename
return ''
# opens a load dialog for files of a specified type to be loaded into a specified window
def loadTypetoTarget(self, fileType, targetWindow, ftype = None):
if not (fileType and targetWindow): return
from tkFileDialog import askopenfilename
ftypes = [(fileType, fileType)]
filename = askopenfilename(filetypes=ftypes, defaultextension=fileType)
self.loadIntoWindow(filename, targetWindow)
# set the config menu to blank
self.configsMenuButton.configure(text='<none>')
# !!! remember to reset all the filenames as well!
if filename:
if ftype == 'l': self.lexfilename = filename
elif ftype == 'r': self.rulfilename = filename
def load(self, *args):
# graphical interface to file loading.
"Load rule/lexicon set from a text file"
from tkFileDialog import askopenfilename
ftypes = [('Text file', '.txt'),
('All files', '*')]
# filename = askopenfilename(filetypes=ftypes, defaultextension='.txt')
filename = 'kimmo.lex'
if filename:
f = read_kimmo_file(filename, self)
lines = f.readlines()
f.close()
rules = []
lexicon = []
alternations = []
state = 'rules'
for line in lines:
line = line.strip()
lexicon.append(line)
self.clear()
self.lexicon.insert(1.0, '\n'.join(lexicon))
# now load up the alternations
filename = 'kimmo.alt'
if filename:
f = read_kimmo_file(filename, self)
lines = f.readlines()
f.close()
for line in lines:
line = line.strip()
alternations.append(line)
self.alternation.insert(1.0, '\n'.join(alternations))
filename = 'kimmo.rul'
if filename:
f = read_kimmo_file(filename, self)
lines = f.readlines()
f.close()
for line in lines:
line = line.strip()
rules.append(line)
self.rules.insert(1.0, '\n'.join(rules))
def clear(self, *args):
"Clears the grammar and lexical and sentence inputs"
self.lexicon.delete(1.0, Tkinter.END)
self.rules.delete(1.0, Tkinter.END)
# self.alternation.delete(1.0, Tkinter.END)
self.results.delete(1.0, Tkinter.END)
def destroy(self, *args):
if self._root is None: return
self.phOut.Stop()
self._root.destroy()
self._root = None
# for single stepping through a trace.
# need to make the kimmo class capable of being interrupted & resumed.
def step(self, *args):
print 'a'
def singlestep(self, *args):
print 'a'
def batch(self, *args):
filename = self.batchpath.get()
if filename:
f = read_kimmo_file(filename, self)
lines = f.readlines()
f.close()
self.initKimmo()
# space the results out a little
self.results.insert(1.0, '\n')
results_string = ''
for line in lines:
# a 'g word' 'r word' format
singleword = line.strip() # should be a single word, no spaces, etc.
spcr = re.compile(r"\s+")
linevals = []
linevals = spcr.split(singleword)
batch_result = []
batch_result_str = ''
if not singleword: continue # ignore blank lines
elif (singleword[0] == '#') or (singleword[0] == ';'): # commented;
results_string += (singleword + '\n')
# self.results.insert(Tkinter.END, singleword + '\n') # send directly to results pane
elif (linevals[0] == 'g') and (len(linevals) == 2):
batch_result = self.kimmoinstance.generate(linevals[1])
elif (linevals[0] == 'r') and (len(linevals) == 2):
batch_result = self.kimmoinstance.recognize(linevals[1])
elif '+' in singleword:
batch_result = self.kimmoinstance.generate(singleword)
else:
batch_result = self.kimmoinstance.recognize(singleword)
# if a valid results
if len(batch_result) > 0:
for x in batch_result: batch_result_str = batch_result_str + x
batch_result_str = batch_result_str + '\n'
results_string += (batch_result_str)
# self.results.insert(Tkinter.END, batch_result_str)
# place a separator between results
self.results.insert(1.0, '----- '+ time.strftime("%a, %d %b %Y %I:%M %p", time.gmtime()) +' -----\n')
self.results.insert(2.0, results_string)
self.results.see(1.0)
if self.traceWindow:
self.highlightMatches(' BLOCKED',self.traceWindow,'#ffe0e0')
self.highlightMatches(' AT END OF WORD',self.traceWindow,'#e0ffe0')
# if the path is set, load the file
# init the engine
# choose between recognize & generate
# generation test
def generate(self, *args):
if self._root is None: return
if len(self.wordIn.get()) > 0:
self.initKimmo()
tmpword = self.wordIn.get()
tmpword.strip()
# generate_result = _generate_test(self.ks, tmpword)
generate_result = self.kimmoinstance.generate(tmpword)
generate_result_str = ''
# convert list to string
for x in generate_result: generate_result_str = generate_result_str + x
generate_result_str = generate_result_str + '\n'
self.results.insert(1.0, generate_result_str)
if self.dbgTracing:
self.highlightMatches(' BLOCKED',self.traceWindow,'#ffe0e0')
self.highlightMatches(' AT END OF WORD',self.traceWindow,'#e0ffe0')
self.highlightMatches('SUCCESS!',self.traceWindow,'#e0ffe0')
def recognize(self, *args):
self.lexicon.tag_delete("highlight")
if self._root is None: return
if len(self.wordIn.get()) > 0:
self.initKimmo()
tmpword = self.wordIn.get()
# pad with terminators
tmpword.strip()
# recognize_result = _recognize_test(self.ks, tmpword, self.km)
recognize_result = self.kimmoinstance.recognize(tmpword)
recognize_result_str = ''
# convert list to string
for x in recognize_result: recognize_result_str = recognize_result_str + x
recognize_result_str = recognize_result_str + '\n'
self.results.insert(1.0, recognize_result_str)
if self.dbgTracing:
self.highlightMatches(' BLOCKED',self.traceWindow,'#ffe0e0')
self.highlightMatches(' AT END OF WORD',self.traceWindow,'#e0ffe0')
# accept gui graph command
# create kimmoinstance
# and then process / display one of the graphs.
def graph(self, which):
self.initKimmo()
graphtitle = ''
# we want to save in the local dir.
# lex/rulefilenames are fully qualified.
# so we test the local dir & strip the path off of the filename.
# check & set path, if necessary, need read and write access to path
path = ''
pathstatus = os.stat('./') # 0600 is r/w, binary evaluation
if not ((pathstatus[0] & 0600) == 0600):
path = '/tmp/' + str(os.environ.get("USER")) + '/' # need terminating /
if not os.path.exists(path):
os.mkdir(path,0777)
pathre = re.compile(r"^.*\/")
if which == 'l':
graphfname = path + pathre.sub("", self.lexfilename)
dotstring = dotformat(self.kimmoinstance.lexicalNodes)
leximagefile = dot2image(graphfname, dotstring)
graphtitle = 'Lexicon Graph'
elif which == 'r':
graphfname = path + pathre.sub("", self.rulfilename)
tmpOptions = []
for x in self.kimmoinstance.fsasNodes:
# print x['name']
tmpOptions.append(x['name'])
ld = ListDialog(self._root,tmpOptions,"Select FSA")
if not ld.result: return
# now create the dotstring & image from the (single) selection
dotstring = dotformat(self.kimmoinstance.fsasNodes[string.atoi(ld.result[0])]['nodes'])
graphtitle = 'FSA ' + self.kimmoinstance.fsasNodes[string.atoi(ld.result[0])]['name']
# make file read:
# something.rul.1.gif (where 1 is the rule index number)
graphfname += ('.' + str(ld.result[0]))
# check if that file already exists, if so, append an iteration number onto it.
leximagefile = dot2image(graphfname, dotstring)
# if this is an imagefile, then create a new window for it.
if leximagefile:
if self.lexicalGraphWindow: self.lexicalGraphWindow.destroy()
self.lexicalGraphWindow = tkImageView(leximagefile, graphtitle)
# validates the lexicon against the alternations to make certain there
# are no misreferences/mispellings of refs.
def validate(self,*args):
self.tagId = 1
for x in self.lexicon.tag_names(): self.lexicon.tag_delete(x)
# for x in self.highlightIds: x[0].tag_delete(x[1])
for l in self.kimmoinstance.validateLexicon:
if not l in self.kimmoinstance.validateAlternations:
if l:
self.guiError('Unused Alternation')
self.highlightMatches(l,self.lexicon,'#ffffc0')
for a in self.kimmoinstance.validateAlternations:
if not a in self.kimmoinstance.validateLexicon:
if a:
self.guiError('Unknown Alternation Name')
self.highlightMatches(a,self.lexicon,'#ffffc0')
# highlight matching words in given window
def highlightMatches(self, word, window,color):
# assumes unbroken with whitespace words.
if not word: return
matchIdx = '1.0'
matchRight = '1.0'
while matchIdx != '':
matchIdx = window.search(word,matchRight,count=1,stopindex=Tkinter.END)
if matchIdx == '': break
strptr = matchIdx.split(".")
matchRight = strptr[0] + '.' + str((int(strptr[1],10) + len(word)))
window.tag_add(self.tagId, matchIdx, matchRight )
window.tag_configure(self.tagId,background=color, foreground='black')
self.highlightIds.append([window,self.tagId])
self.tagId = self.tagId + 1
# INIT KIMMO
def initKimmo(self, *args):
"""
Initialize the Kimmo engine from the lexicon. This will get called no matter generate
or recognize. (i.e. loading all rules, lexicon, and alternations
"""
# only initialize Kimmo if the contents of the *rules* have changed
tmprmd5 = md5.new(self.rules.get(1.0, Tkinter.END))
tmplmd5 = md5.new(self.lexicon.get(1.0, Tkinter.END))
if (not self.kimmoinstance) or (self.rulemd5 != tmprmd5) or (self.lexmd5 != tmplmd5):
self.guiError("Creating new Kimmo instance")
self.kimmoinstance = KimmoControl(self.lexicon.get(1.0, Tkinter.END),self.rules.get(1.0, Tkinter.END),'','',self.debug)
self.guiError("")
self.rulemd5 = tmprmd5
self.lexmd5 = tmplmd5
if not self.kimmoinstance.ok:
self.guiError("Creation of Kimmo Instance Failed")
return
if not self.kimmoinstance.m.initial_state() :
self.guiError("Morphology Setup Failed")
elif self.kimmoinstance.errors:
self.guiError(self.kimmoinstance.errors)
self.kimmoinstance.errors = ''
# self.validate()
def refresh(self, *args):
if self._root is None: return
print self.wordIn.get()
# CAPTURE PYTHON-KIMMO OUTPUT
# redirect to debug window, if operational
def capturePrint(self,*args):
# self.debugWin.set(string.join(args," "))
# if there is a trace/debug window
if self.dbgTracing:
self.traceWindow.insert(Tkinter.END, string.join(args," "))
self.traceWindow.see(Tkinter.END)
# otherwise, just drop the output.
# no no, if tracing is on, but no window, turn tracing off and cleanup window
# !!! if tracing is on, but window is not defined, create it.
# this will cause a post-recover from an improper close of the debug window
# if tracing is not on, ignore it.
# return 1,1,'Out Hooked:'+text
return 0,0,''
def kimmoHelp(self,*args):
# helpText = """
# """
# load help into helpfile
# helpText = Tkinter.StringVar()
helpText = ''
try: f = open(self.helpFilename, 'r')
except IOError, e:
self.guiError("HelpFile not loaded")
return
self.guiError("") # no errors to report here
# this is not the best idea, what if there are many errors
# from different functions?
helpText = str(f.read())
f.close()
# clean any crl stuff
helpText = re.sub("\r","",helpText)
helpWindow = Tkinter.Toplevel()
helpWindow.title("PyKimmo Documentation & Help")
# help = Tkinter.Label(helpWindow,textvariable=helpText, justify='left' ) #
help = ScrolledText(helpWindow, background='#f0f0f0',
foreground='black', width=70, height=40,wrap='none',
font='Times 12 bold') #
help.pack(side='top')
help.insert(1.0, helpText)
# setup the scrollbar
scroll = Tkinter.Scrollbar(helpWindow, orient='horizontal',command=help.xview)
scroll.pack(side='bottom', fill='x')
help.configure(xscrollcommand = scroll.set)
# now highlight up the file
matchIdx = Tkinter.END
matchRight = Tkinter.END
matchLen = Tkinter.IntVar()
tagId = 1
while 1:
matchIdx = help.search(r"::[^\n]*::",matchIdx, stopindex=1.0, backwards=True, regexp=True, count=matchLen )
if not matchIdx: break
matchIdxFields = matchIdx.split(".")
matchLenStr = matchIdxFields[0] + "." + str(string.atoi(matchIdxFields[1],10) + matchLen.get())
print (matchIdx, matchLenStr)
help.tag_add(tagId, matchIdx, matchLenStr )
help.tag_configure(tagId, background='aquamarine', foreground='blue', underline=True)
tagId += 1
################################ PRINT HOOK ######################
# this class gets all output directed to stdout(e.g by print statements)
# and stderr and redirects it to a user defined function
class PrintHook:
#out = 1 means stdout will be hooked
#out = 0 means stderr will be hooked
def __init__(self,out=1):
self.func = None ##self.func is userdefined function
self.origOut = None
self.out = out
#user defined hook must return three variables
#proceed,lineNoMode,newText
def TestHook(self,text):
f = open('hook_log.txt','a')
f.write(text)
f.close()
return 0,0,text
def Start(self,func=None):
if self.out:
sys.stdout = self
self.origOut = sys.__stdout__
else:
sys.stderr= self
self.origOut = sys.__stderr__
if func:
self.func = func
else:
self.func = self.TestHook
#Stop will stop routing of print statements thru this class
def Stop(self):
self.origOut.flush()
if self.out:
sys.stdout = sys.__stdout__
else:
sys.stderr = sys.__stderr__
self.func = None
#override write of stdout
def write(self,text):
proceed = 1
lineNo = 0
addText = ''
if self.func != None:
proceed,lineNo,newText = self.func(text)
if proceed:
if text.split() == []:
self.origOut.write(text)
else:
#if goint to stdout then only add line no file etc
#for stderr it is already there
if self.out:
if lineNo:
try:
raise "Dummy"
except:
newText = 'line('+str(sys.exc_info()[2].tb_frame.f_back.f_lineno)+'):'+newText
codeObject = sys.exc_info()[2].tb_frame.f_back.f_code
fileName = codeObject.co_filename
funcName = codeObject.co_name
self.origOut.write('file '+fileName+','+'func '+funcName+':')
self.origOut.write(newText)
#pass all other methods to __stdout__ so that we don't have to override them
def __getattr__(self, name):
return self.origOut.__getattr__(name)
class tkImageView:
def __init__(self, imagefileName, title):
self._root = Tkinter.Toplevel()
self._root.title(title + ' (' + imagefileName + ')')
self.image = Tkinter.PhotoImage("LGraph",file=imagefileName)
Tkinter.Label(self._root, image=self.image).pack(side='top',fill='x')
# self._root.mainloop()
def destroy(self, *args):
if self._root:
self._root.destroy()
self._root = None
self.image = None
######################### Dialog Boxes ##############################
class ListDialog(Tkinter.Toplevel):
def __init__(self, parent, listOptions, title = None):
Tkinter.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = Tkinter.Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
box = Tkinter.Frame(self)
Tkinter.Label(box,text="Select an FSA to graph").pack(side='top',fill='x')
box.pack()
self.listbox(listOptions)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
#
# construction hooks
def body(self, master):
# create dialog body. return widget that should have
# initial focus. this method should be overridden
pass
def listbox(self, listOptions):
box = Tkinter.Frame(self)
self.lb = Tkinter.Listbox(box,height=len(listOptions),width=30,background='#f0f0ff', selectbackground='#c0e0ff'
,selectmode='single')
self.lb.pack()
for x in listOptions:
self.lb.insert(Tkinter.END,x)
box.pack()
def buttonbox(self):
# add standard button box. override if you don't want the
# standard buttons
box = Tkinter.Frame(self)
w = Tkinter.Button(box, text="OK", width=10, command=self.ok, default="active")
w.pack(side="left", padx=5, pady=5)
w = Tkinter.Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side="left", padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
#
# standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
self.apply()
# we want to return self.lb.curselection()
self.result = self.lb.curselection()
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
return 1 # override
def apply(self):
pass # override
################################ Dot Grapher ######################
# given a state table with names, draw graphs in dot format.
"""
+ CNsib + s # y o @
e CNsib @ s # i o @
1: 0 2 1 2 1 2 7 1
2: 3 2 5 2 1 2 7 1
3. 0 0 0 4 0 0 0 0
4. 0 0 1 0 1 0 0 0
5: 0 1 1 6 1 1 1 1
6: 0 1 0 1 0 1 1 1
7: 3 2 1 2 1 2 7 1
"""
# so first we will create the states.
# then we will write the edges & name them.
# name 0 as fail
# call the dot drawer on the file & display the graph.
def dotformat(nodeEdgeAry):
# choose graphsize based upon number of nodes
graphWH = '4,4'
if len(nodeEdgeAry) > 3: graphWH = '5,5'
if len(nodeEdgeAry) > 5: graphWH = '6,6'
if len(nodeEdgeAry) > 7: graphWH = '7,7'
if len(nodeEdgeAry) > 10: graphWH = '7.5,7.5'
# print len(nodeEdgeAry)
# print graphWH
dotstring = ''
dotstring += " size=\""+ graphWH +"\"\n"
# dotstring += " page=\"7,7\"\n"
dotstring += " ratio=fill\n"
# dotstring += " rankdir=LR\n"
# dotstring += " center=1\n"
for x in nodeEdgeAry:
if x['node'] == 'Begin': features = ' [' + 'shape=box,color=lightblue,style=filled] '
elif x['node'] == 'End': features = ' [' + 'color="Light Coral",style=filled] '
elif x['features'] : features = ' [' + x['features'] + '] '
elif not x['features'] : features = ''
dotstring += (' "' + x['node'] + '" ' + features + ";\n")
for e in range(len(x['edges'])):
dotstring += (' "' + x['node'] + '" -> "' + x['edges'][e] + '" ')
if e < len(x['edgenames']) : dotstring += ('[label="\l'+ x['edgenames'][e] + '"]' )
dotstring += ";\n"
dotstring = "digraph autograph {\n" + dotstring + "\n}\n"
return dotstring
def _classeq(instance1, instance2):
"""
@return: true iff the given objects are instances of the same
class.
@rtype: C{bool}
"""
return (type(instance1) == types.InstanceType and
type(instance2) == types.InstanceType and
instance1.__class__ == instance2.__class__)
# given a dot string, write to a tmp file and invoke the grapher
# return a filename to open.
# imagetype is hardcoded for now
def dot2image(filename, dotstring):
dotfilename = filename + '.dot'
# imgfilename = filename + '.gif'
psfilename = filename + '.ps'
imgfilename = filename + '.ppm'
pngfilename = filename + '.png'
# whack the file if already there... (for now)
f = open(dotfilename, 'w')
f.write(dotstring)
f.close()
os.system('dot -Tps -o ' + psfilename +' ' + dotfilename)
# os.system('dot -Tgif -o ' + imgfilename +' ' + dotfilename)
#print filename + "\n"
#print imgfilename + "\n"
# cheap hack now that graphviz is not working right...
os.system('rm -f ' + imgfilename)
os.system('pstopnm -stdout -portrait -ppm ' + psfilename + ' > ' + imgfilename)
if os.path.isfile(imgfilename) : return imgfilename
return ''
################################ KIMMO SET ######################
# ----------- KIMMOCONTROL ---------------
# Master instance for creating a kimmo object
# from files or strings or rules & lexical entries
# -------------------------------------
class KimmoControl:
def __init__(self, lexicon_string, rule_string, lexicon_file, rule_file, debug):
self.validateLexicon = []
self.validateAlternations = []
self.lexicalNodes = [] # transition states and edges for graphing lexicon
self.ruleNodes = [] # transition states & edges for graphing of rules
# a better way is just to use a destructor and check if the object exists.
self.ok = 0
self.errors = ''
# load lexicon file
if lexicon_file:
f = read_kimmo_file(lexicon_file)
lexicon_string = string.join(f.readlines(),"")
f.close()
# load rule file
if rule_file:
f = read_kimmo_file(rule_file)
rule_string = string.join(f.readlines(),"")
f.close()
try:
self.processRules(rule_string)
self.processLexicon(lexicon_string)
self.m = KimmoMorphology(self.kalternations, self.klexicons)
self.m.set_boundary(self.boundary_char)
self.s = KimmoRuleSet(self.ksubsets, self.kdefaults, self.krules)
self.s.debug = debug
self.ok = 1
except RuntimeError, e:
self.errors = ('Caught:' + str(e) + ' ' + self.errors)
print 'Caught:', e
print "Setup of the kimmoinstance failed. Most likely cause"
print "is infinite recursion due to self-referential lexicon"
print "For instance:"
print "Begin: Begin Noun End"
print "Begin is pointing to itself. Simple example, but check"
print "to insure no directed loops"
self.ok = 0
def generate(self, word):
if self.boundary_char: word += self.boundary_char
genlist = _generate_test(self.s, word)
genliststr = genlist.__repr__()
if self.boundary_char: genliststr = genliststr.replace(self.boundary_char,'')
return eval(genliststr)
def recognize(self, word):
return _recognize_test(self.s, word, self.m)
# run a batch and print to console. This is different than the
# batch for the gui;
# the kimmo object should already be created when the batch is run.
# the output is also not formatted nicely
def batch(self, filename):
if filename:
f = read_kimmo_file(filename)
lines = f.readlines()
f.close()
# space the results out a little
results_string = ''
for line in lines:
# a 'g word' 'r word' format
singleword = line.strip() # should be a single word, no spaces, etc.
spcr = re.compile(r"\s+")
linevals = []
linevals = spcr.split(singleword)
batch_result = []
batch_result_str = ''
if not singleword: continue # ignore blank lines
elif (singleword[0] == '#') or (singleword[0] == ';'): # commented;
results_string += (singleword + '\n')
elif (linevals[0] == 'g') and (len(linevals) == 2):
batch_result = self.generate(linevals[1])
elif (linevals[0] == 'r') and (len(linevals) == 2):
batch_result = self.recognize(linevals[1])
elif '+' in singleword:
batch_result = self.generate(singleword)
else:
batch_result = self.recognize(singleword)
# if a valid results
if len(batch_result) > 0:
for x in batch_result: batch_result_str = batch_result_str + x
batch_result_str = batch_result_str + '\n'
results_string += (batch_result_str)
# place a separator between results
print '----- '+ time.strftime("%a, %d %b %Y %I:%M %p", time.gmtime()) +' -----\n'
print results_string
# move this out into a kimmo files & frontend class.
# make this also process alternations, if contained.
def processLexicon(self, text):
"""
Takes the currently typed in lexicon and turns them from text into
the kimmo lexicon array.
"""
# text = self.lexicon.get(1.0, Tkinter.END)
testlex = []
self.klexicons = [] # lexicons needs to be an object of the gui scope
lexigroup = ''
kimmoWords = []
alternationText = ''
tmpnode = {} # a node and its edges
tmpnode['node'] = ''
tmpnode['features'] = ''
tmpnode['edges'] = []
tmpnode['edgenames'] = []
self.lexicalNodes = [] # list of nodes & their edges for the lexicon
for item in text.split("\n"):
# '' None Genitive
cleanLine = item.strip()
if len(cleanLine) == 0 : continue # blank line
elif cleanLine[0] == '#' : continue # a comment
elif cleanLine[0] == ';' : continue # a comment
# elsif there is a : then start up this lexicon entry.
# if there is already a value in lexigroup, then append to lexicons
# assume that : is the last char.
# LEXICON N_ROOT1
elif cleanLine[len(cleanLine)-1] == ':' :
if (len(lexigroup) > 0):
if len(kimmoWords):
# print lexigroup
# print kimmoWord
self.klexicons.append( KimmoLexicon(lexigroup, kimmoWords) )
self.lexicalNodes.append(tmpnode)
kimmoWords = []
lexigroup = cleanLine[0:len(cleanLine)-1] # remove trailing ':' , new group
# create the state transitions for the lexicon.
tmpnode = {}
tmpnode['node'] = lexigroup
tmpnode['features'] = ''
tmpnode['edges'] = []
tmpnode['edgenames'] = []
self.validateLexicon.append(lexigroup)
# print lexigroup
# assume that a : contained in the line that is not a last char means it is an alternation.
elif ':' in cleanLine:
alternationText += ( cleanLine + "\n")
elif lexigroup:
p = re.compile(r"\s+")
moreitems = []
# moreitems = item.split(" ") # make sure to add tabs and other whitespace..
moreitems = p.split(item)
# this is splitting on the wrong char
# *recollect*. doesn't work on multiple spaces.
# this code only works for the last field
rangestart = -1
for x in range(len(moreitems)):
# print moreitems[x]
if (moreitems[x][0] == '"') and (rangestart < 0): rangestart = x
elif (moreitems[x][len(moreitems[x])-1] == '"') and (rangestart > -1):
rangeend = x
moreitems[rangestart] = string.join(moreitems[rangestart:rangeend+1], " ")
i = 0
for furtheritem in moreitems:
furtheritem = furtheritem.strip()
moreitems[i] = furtheritem
if not len(moreitems[i]): continue
if i > 2 : continue
else: testlex.append(moreitems[i])
i += 1
for x in range(len(moreitems)):
if x > 2: continue
elif (moreitems[x] == '\'\'') or (moreitems[x] == '""'):
moreitems[x] = ''
elif (moreitems[x][0] == '"') and (moreitems[x][len(moreitems[x])-1] == '"'):
moreitems[x] = moreitems[x][1:len(moreitems[x])-1]
elif (moreitems[x][0] == '\'') and (moreitems[x][len(moreitems[x])-1] == '\''):
tmpitem = moreitems[x]
moreitems[x] = tmpitem[1:(len(tmpitem)-1)]
elif moreitems[x] == 'None' : moreitems[x] = None
# EXPECTED FORMAT IS:
# WORD ALTERNATION DESCRIPTION
if len(moreitems) > 2 :
kimmoWords.append( KimmoWord(moreitems[0], moreitems[2], moreitems[1]) )
self.validateLexicon.append(moreitems[1])
# print moreitems
elif len(moreitems) > 1 :
kimmoWords.append( KimmoWord(moreitems[0], '', moreitems[1]) )
self.validateLexicon.append(moreitems[1])
if (len(moreitems) > 1) and not (moreitems[1] in tmpnode['edges']):
tmpnode['edges'].append(moreitems[1])
else :
# an undefined line.
self.errors += "Unknown Line in Lexicon (" + cleanLine + ")"
# if the end of file and there is a group defined, add this last group
if (len(lexigroup) > 0) and (len(kimmoWords)):
self.klexicons.append( KimmoLexicon(lexigroup, kimmoWords) )
self.lexicalNodes.append(tmpnode)
# process the alternations
# print alternationText
self.processAlternations(alternationText)
# return an array of state and edge objects.
return self.lexicalNodes
# process ALTERNATIONS
# self.kalternations = [
# KimmoAlternation('Begin', [ 'N_ROOT', 'ADJ_PREFIX', 'V_PREFIX', 'End' ]),
def processAlternations(self, text):
"""
Takes the currently typed in alternations and turns them from text into
the kimmo alternation array.
"""
# text = self.alternation.get(1.0, Tkinter.END)
testalt = []
self.kalternations = [] # lexicons needs to be an object of the gui scope
altgroup = ''
kimmoAlts = []
for line in text.split("\n"):
# '' None Genitive
cleanLine = line.strip()
if len(cleanLine) == 0 : continue # blank line
elif cleanLine[0] == '#' : continue # a comment
elif cleanLine[0] == ';' : continue # a comment
else:
# lets do this one differently.
# lets break it first, then keep on looping until we find the next group (signified by a : )
p = re.compile(r"\s+")
items = []
items = p.split(cleanLine)
for item in items:
item_tmp = item.strip()
if len(item_tmp) == 0 : continue
# ALTERNATION V_root
elif ':' in item_tmp :
# all all prior alternations to prior altgroup (if defined)
if len(altgroup) > 0:
if len(kimmoAlts) > 0:
self.kalternations.append(
KimmoAlternation(altgroup, kimmoAlts) )
self.validateAlternations.append(altgroup)
for x in kimmoAlts: self.validateAlternations.append(x)
self.lexicalNodes.append(tmpnode)
# set new altgroup
altgroup = cleanLine[0:len(item_tmp)-1]
kimmoAlts = []
tmpnode = {}
tmpnode['node'] = altgroup
tmpnode['features'] = 'color=\"aquamarine2\", style=filled'
tmpnode['edges'] = []
tmpnode['edgenames'] = []
else :
# remove '' surrounding alternations
if (item_tmp[0] == '\'') and (item_tmp[len(item_tmp)-1] == '\''):
item_tmp = item_tmp[1:(len(item_tmp)-1)]
# convert None
elif item_tmp == 'None' : item_tmp = None
# print 'a \'' + item_tmp + '\''
kimmoAlts.append(item_tmp)
# add alternation edges ; order independent.
tmpnode['edges'].append(item_tmp)
if len(altgroup) > 0:
if len(kimmoAlts) > 0:
self.kalternations.append(
KimmoAlternation(altgroup, kimmoAlts) )
self.validateAlternations.append(altgroup)
for x in kimmoAlts: self.validateAlternations.append(x)
self.lexicalNodes.append(tmpnode)
# print self.validateAlternations
# RULES
# Rule format
# KimmoFSARule('08:elision: e:0 <= VCC*___+:0 V',
# ' Cpal C e:0 e:@ +:0 Vbk V @', # english.rul needed pairs re-ordered
# [ (1, True, [ 1, 1, 1, 2, 1, 2, 2, 1 ]),
# (2, True, [ 3, 6, 1, 2, 1, 2, 2, 1 ]), # V...
# (3, True, [ 3, 6, 1, 4, 1, 2, 2, 1 ]), # V Cpal...
# (4, True, [ 1, 1, 1, 2, 5, 2, 2, 1 ]), # V Cpal e...
# (5, True, [ 1, 1, 1, 0, 1, 2, 0, 1 ]), # V Cpal e +:0... [english.rul needed fixing]
# (6, True, [ 1, 1, 1, 7, 1, 2, 2, 1 ]), # V C...
# (7, True, [ 1, 1, 1, 2, 8, 2, 2, 1 ]), # V C e...
# (8, True, [ 1, 1, 1, 0, 1, 0, 0, 1 ]) ]), # V C e +:0... [english.rul needed fixing]
def processRules(self, text):
"""
Takes the currently typed in rules and processes them into the python kimmo
format. expects rules to be in c version of .rul file format. needs to
be file compatible.
"""
# text = self.rules.get(1.0, Tkinter.END)
testrule = []
self.krules = []
self.ksubsets = []
self.kdefaults = []
self.boundary_char = ''
setgroup = ''
rulegroup = ''
rulerowcnt = 0
rulecolcnt = 0
kimmoRule = []
ruleFrom = []
ruleTo = []
ruleTran = []
anyset = ['','','','']
tmpnode = {} # a node and its edges
tmpnode['node'] = ''
tmpnode['features'] = ''
tmpnode['edges'] = [] # list of the transitions
tmpnode['edgenames'] = [] # matched array naming each transition
tmpfsanodes = {}
tmpfsanodes['nodes'] = []
tmpfsanodes['name'] = ''
self.fsasNodes = [] # list of nodes & their edges for the lexicon
for line in text.split("\n"):
# '' None Genitive
cleanLine = line.strip()
if len(cleanLine) == 0 : continue # blank line
# this char can be a comment if it is not the boundary char.
# yes, yes, it should be defined such that it is not in the alphabet at all
# also boundary would need to be defined before ...
elif (cleanLine[0] == '#') and (anyset[3] != '#'): continue # a comment
elif (cleanLine[0] == ';') and (anyset[3] != ';') : continue # a comment
else:
# lets do this one differently.
# lets break it first, then keep on looping until we find the next group (signified by a : )
p = re.compile(r"\s+")
items = []
items = p.split(cleanLine)
# now handle subset keywords
# KimmoSubset('C', 'b c d f g h j k l m n p q r s t v w x y z'),
if items[0] == 'SUBSET':
if items[1] == 'ALL': items[1] = '@'
self.ksubsets.append(
KimmoSubset(items[1], string.join(items[2:len(items)]," ") ))
# print items[1] + ' ' + string.join(items[2:len(items)]," ")
# load up the fsa regexp based on alphabet
# also set up the @ subset if alphabet is defined (old rule file style)
elif items[0] == 'ALPHABET': anyset[1] = string.join(items[1:len(items)]," ")
elif items[0] == 'ANY': anyset[0] = items[1]
elif items[0] == 'NULL': anyset[2] = items[1]
# using the boundary char, set the final boundary & also add to the any set.
elif items[0] == 'BOUNDARY':
anyset[3] = items[1]
self.boundary_char = items[1]
elif items[0] == 'DEFAULT':
self.kdefaults = [ KimmoDefaults(string.join(items[1:len(items)]," ")) ]
elif items[0] == 'ARROWRULE':
# ARROWRULE 03:epenthesis1 0:e ==> [Csib (c h) (s h) y:i] +:0 _ s [+:0 #]
# KimmoArrowRule('03:epenthesis1', '0:e ==> [Csib (c h) (s h) y:i] +:0 _ s [+:0 #]'),
# print items[1] + ' ' + string.join(items[2:len(items)]," ")
self.krules.append(
KimmoArrowRule(items[1], string.join(items[2:len(items)]," "))
# KimmoArrowRule('05:y:i-spelling', 'y:i <=> @:C +:0? _ +:0 ~I')
)
elif items[0] == 'RULE': # this is actually FSArules
# make compatible with rul files
if rulegroup: self.guiError('error, fsa rule not finished')
rulecolcnt = string.atoi(items[len(items)-1])
rulerowcnt = string.atoi(items[len(items)-2])
rulegroup = string.join(items[1:len(items)-2])
# create the structure (for graphing) for storing the transitions
# of the fsas
tmpfsanodes = {}
tmpfsanodes['nodes'] = []
tmpfsanodes['name'] = rulegroup
# add the fail node by default
tmpnode = {} # a node and its edges
tmpnode['node'] = '0'
tmpnode['features'] = 'color="indianred1", style=filled, shape=box'
tmpnode['edges'] = []
tmpnode['edgenames'] = []
tmpfsanodes['nodes'].append(tmpnode)
elif rulegroup:
# assume TRUE rules for now
# non-char test; already stripped of whitespace
ct = re.compile('[^0-9:\.]') # go with [A-Za-z]
# if text, then add to first lines of fsa
# get row1 and row2 of text & translate into x:y col format.
# if a number and until number is equal to row count, add
# i.e. not text
if ((':' in items[0]) or ('.' in items[0])) and (not ct.match(items[0])):
# make sure to check for TRUE vs FALSE rows...
# sprint items[0][0:len(items[0])-1] + ' -- ' + string.join(items[1:len(items)], " ")
if (items[0][len(items[0])-1] == ':') : finalstate = True
elif (items[0][len(items[0])-1] == '.') : finalstate = False
else :
self.guiError("FSA table failure -- 'final state defn'")
continue
items[0] = items[0][0:len(items[0])-1] # remove the ':'
# convert to integers (instead of strings)
for x in range(rulecolcnt + 1): items[x] = string.atoi(items[x]) # including the first row number - i.e. '4:'
# add this row.
kimmoRule.append((items[0], finalstate, items[1:len(items)]))
# now make this row into graph transitions
tmpnode = {} # a node and its edges
tmpnode['node'] = str(items[0])
tmpnode['features'] = 'shape=box, fillcolor="lavender blush", style=filled'
if finalstate and (items[0] == 1):
tmpnode['features'] = 'shape=circle, color="paleturquoise2", style=filled'
elif (items[0] == 1):
tmpnode['features'] = 'color="paleturquoise2", style=filled, shape=box'
elif (finalstate):
tmpnode['features'] = 'shape=circle,fillcolor="honeydew2", style=filled'
tmpnode['edges'] = []
tmpnode['edgenames'] = []
# add as strings
# add unique, but group edgenames together
tmpitems = items[1:len(items)]
for i in range(len(tmpitems)):
if str(tmpitems[i]) in tmpnode['edges']:
# find the index j of the matching target
for j in range(len(tmpnode['edges'])):
if str(tmpnode['edges'][j]) == str(tmpitems[i]):
m = re.match(r"(^|\\n)([^\\]*)$", tmpnode['edgenames'][j])
# instead use a regular expression...
# this should really be done in dotstring
if not m:
tmpnode['edgenames'][j] += (',' + ruleTran[i])
elif (len(m.group(2)) >= 15):
tmpnode['edgenames'][j] += ('\\n ' + ruleTran[i])
else:
tmpnode['edgenames'][j] += (',' + ruleTran[i])
else:
tmpnode['edges'].append(str(tmpitems[i]))
tmpnode['edgenames'].append(ruleTran[i])
"""
for x in items[1:len(items)]:
# go through and check, already added?
# for i in range(len(tmpnode['edges'])):
# if tmpnode['edges'][i] == x:
# tmpnode['edgenames'][i] += "," +
tmpnode['edges'].append(str(x))
for x in ruleTran: tmpnode['edgenames'].append(x)
"""
tmpfsanodes['nodes'].append(tmpnode)
# if number is equal to row count, then add total and reset rule group
if ( items[0] == rulerowcnt):
self.krules.append(
KimmoFSARule(str(rulerowcnt)+':'+rulegroup, string.join(ruleTran," "), kimmoRule))
# add to the master graph list
self.fsasNodes.append(tmpfsanodes)
rulegroup = ''
rulerowcnt = 0
rulecolcnt = 0
ruleTran = [] # reset the translation array
kimmoRule = [] # resent the kimmo rules as well
# the char class/translations
elif len(items) == rulecolcnt:
# old style has 2 rows, class from, class to
if len(ruleFrom) == 0: ruleFrom = items
elif len(ruleTo) == 0: ruleTo = items
# if ruleTo is ruleFrom: continue
if (len(ruleTo) != rulecolcnt) or (len(ruleFrom) != rulecolcnt): continue
else:
for x in range(rulecolcnt):
if ruleTo[x] == ruleFrom[x]: ruleTran.append(ruleTo[x])
else:
ruleTran.append(ruleFrom[x] + ':' + ruleTo[x])
ruleTo = []
ruleFrom = []
# take care of the anyset, if it was defined (make into a subset)
if (anyset[0] and anyset[1]):
self.ksubsets.append(KimmoSubset(anyset[0], string.join(anyset[1:len(anyset)]," ") ))
# print self.fsasNodes
# ----------- KIMMOPAIR ---------------
#
# -------------------------------------
class KimmoPair:
"""
Input/Output character pair
"""
def __init__(self, input_subset, output_subset):
self._input = input_subset
self._output = output_subset
def input(self): return self._input
def output(self): return self._output
def __repr__(self):
sI = self.input()
sO = self.output()
s = sI + ':' + sO
return s
def __eq__(self, other):
return (_classeq(self, other) and
self._input == other._input and
self._output == other._output)
def __hash__(self):
return hash( (self._input, self._output,) )
def matches(self, input, output, subsets, negatedOutputMatch=False):
if not(self._matches(self.input(), input, subsets)): return False
m = self._matches(self.output(), output, subsets)
if negatedOutputMatch: return not(m)
return m
def _matches(self, me, terminal, subsets):
if (me == terminal): return True
if (me[0] == '~'):
m = me[1:]
if (m in subsets):
return not(terminal in subsets[m])
else:
return False
if (me in subsets):
return terminal in subsets[me]
else:
return False
_kimmo_terminal_regexp = '[a-zA-Z0-9\+\'\-\#\@\$\%\!\^\`\}\{]+' # \}\{\<\>\,\.\~ # (^|\s)?\*(\s|$) !!! * is already covered in the re tokenizer
_kimmo_terminal_regexp_fsa = '[^:\s]+' # for FSA, only invalid chars are whitespace and :
# '[a-zA-Z0-9\+\'\-\#\@\$\%\!\^\`\}\{\<\>\,\.\~\*]+'
_kimmo_terminal_regexp_ext= '~?' + _kimmo_terminal_regexp
_kimmo_defaults = _kimmo_terminal_regexp + '|\:'
_kimmo_defaults_fsa = _kimmo_terminal_regexp_fsa + '|\:'
_kimmo_rule = _kimmo_terminal_regexp_ext + '|[\:\(\)\[\]\?\&\*\_]|<=>|==>|<==|/<='
_arrows = ['==>', '<=>', '<==', '/<=']
_special_tokens = ['(', ')', '[', ']', '*', '&', '_', ':']
_special_tokens.extend(_arrows)
_non_list_initial_special_tokens = [')', ']', '*', '&', '_', ':']
_non_list_initial_special_tokens.extend(_arrows)
def parse_pair_sequence(description,token_type):
"""Read the description, which should be in form [X|X:Y]+, and return a list of pairs"""
if token_type == 'FSA':
desc = list(tokenize.regexp(description, _kimmo_defaults_fsa))
else:
desc = list(tokenize.regexp(description, _kimmo_defaults))
prev = None
colon = False
result = []
for token in desc:
if token == ':':
if colon: raise ValueError('two colons in a row')
if prev == None: raise ValueError('colon must follow identifier')
colon = True
elif colon:
result.append(KimmoPair(prev, token))
prev = None
colon = False
else:
if prev:
result.append(KimmoPair(prev, prev))
prev = token
colon = False
if colon: raise ValueError('colon with no following identifier')
if prev: result.append(KimmoPair(prev, prev))
return result
class KimmoSubset:
def __init__(self, name, description):
self._name = name
self._description = description
self._subset = list(set(tokenize.regexp(description, _kimmo_terminal_regexp_fsa)))
def name(self): return self._name
def description(self): return self._description
def subset(self): return self._subset
def __repr__(self):
return '<KimmoSubset %s: %s>' % (self.name(), self.description(),)
class KimmoDefaults:
def __init__(self, description):
self._description = description
self._defaults = set()
for p in parse_pair_sequence(description, ''):
self.defaults().add(p)
def defaults(self): return self._defaults
def __repr__(self):
return '<KimmoDefaults %s>' % (self._description,)
class KimmoRule:
def pairs(self): raise RuntimeError('unimplemented: KimmoRule.pairs()')
def right_advance(self, current_states, input, output, subsets):
raise RuntimeError('unimplemented: KimmoRule.right_advance()')
class KimmoArrowRule:
"""
Two level rule
"""
def leftFSA(self): return self._left_fsa
def rightFSA(self): return self._right_fsa
def pairs(self): return self._pairs
def arrow(self): return self._arrow
def lhpair(self): return self._lhpair
def __init__(self, name, description):
self._name = name
self._description = description
self._negated = False
self._pairs = set()
desc = list(tokenize.regexp(description, _kimmo_rule))
self._parse(desc)
def __repr__(self):
return '<KimmoArrowRule %s: %s>' % (self._name, self._description)
def advance(self, fsa, current_states, input, output, subsets):
"""Returns a tuple of (next_states, contains_halt_state)"""
result = []
contains_halt_state = False
for current_state in current_states:
for next_state in fsa.forward_traverse(current_state):
ok = False
for pair in fsa._labels[(current_state, next_state)]:
if pair.matches(input, output, subsets):
ok = True
break
if (ok):
if (next_state in fsa.finals()): contains_halt_state = True
if not(next_state in result): result.append(next_state)
return (result, contains_halt_state)
def right_advance(self, current_states, input, output, subsets):
return self.advance(self.rightFSA(), current_states, input, output, subsets)
def matches(self, input, output, subsets):
"""Does this rule's LHS match this input/output pair?
If it doesn't, return None. If it does, return True if the rule must pass, False if the rule must fail."""
if (self.arrow() == '==>'):
if self.lhpair().matches(input, output, subsets):
return True
else:
return None
elif (self.arrow() == '<=='):
if self.lhpair().matches(input, output, subsets, negatedOutputMatch=True):
return False
else:
return None
elif (self.arrow() == '/<='):
if self.lhpair().matches(input, output, subsets, negatedOutputMatch=False):
return False
else:
return None
elif (self.arrow() == '<=>'):
if self.lhpair().matches(input, output, subsets, negatedOutputMatch=False):
return True
elif self.lhpair().matches(input, output, subsets, negatedOutputMatch=True):
return False
else:
return None
else:
raise RuntimeError('unknown arrow: '+self.arrow())
def _parse(self, tokens):
(end_pair, tree) = self._parse_pair(tokens, 0)
lhpair = self._pair_from_tree(tree)
self._lhpair = lhpair
self._pairs.add(lhpair)
end_arrow = self._parse_arrow(tokens, end_pair)
(end_left, lfsa) = self._parse_context(tokens, end_arrow, True)
end_slot = self._parse_slot(tokens, end_left)
(end_right, rfsa) = self._parse_context(tokens, end_slot, False)
if not(end_right == len(tokens)):
raise ValueError('unidentified tokens')
self._left_fsa = lfsa
self._right_fsa = rfsa
def _next_token(self, tokens, i, raise_error=False):
if i >= len(tokens):
if raise_error:
raise ValueError('ran off end of input')
else:
return None
return tokens[i]
def _pair_from_tree(self, tree):
if (tree.node != 'Pair'): raise RuntimeException('expected Pair, got ' + str(tree))
if len(tree) == 1:
return KimmoPair(tree[0], tree[0])
else:
return KimmoPair(tree[0], tree[2])
def _parse_pair(self, tokens, i):
# print 'parsing pair at ' + str(i)
t1 = self._next_token(tokens, i, True)
if t1 in _special_tokens: raise ValueError('expected identifier, not ' + t1)
t2 = t1
j = i + 1
if self._next_token(tokens, j) == ':':
t2 = self._next_token(tokens, j+1, True)
if t2 in _special_tokens: raise ValueError('expected identifier, not ' + t2)
j = j + 2
tree = Tree('Pair', tokens[i:j])
else:
tree = Tree('Pair', [tokens[i]])
#print str(self._pair_from_tree(tree)) + ' from ' + str(i) + ' to ' + str(j)
return (j, tree)
def _parse_arrow(self, tokens, i):
self._arrow = self._next_token(tokens, i, True)
if not(self.arrow() in _arrows):
raise ValueError('expected arrow, not ' + self.arrow())
#print 'arrow from ' + str(i) + ' to ' + str(i+1)
return i + 1
def _parse_slot(self, tokens, i):
slot = self._next_token(tokens, i, True)
if slot != '_':
raise ValueError('expected _, not ' + slot)
# print 'slot from ' + str(i) + ' to ' + str(i+1)
return i + 1
def _parse_context(self, tokens, i, reverse):
(j, tree) = self._parse_list(tokens, i)
if j == i: return (i, None)
sigma = set()
self._collect_alphabet(tree, sigma)
fsa = FSA(sigma)
final_state = self._build_fsa(fsa, fsa.new_state(), tree, reverse)
fsa.set_final([final_state])
#fsa.pp()
dfa = fsa.dfa()
#dfa.pp()
dfa.prune()
#dfa.pp()
return (j, dfa)
def _collect_alphabet(self, tree, sigma):
if tree.node == 'Pair':
pair = self._pair_from_tree(tree)
sigma.add(pair)
self._pairs.add(pair)
else:
for d in tree: self._collect_alphabet(d, sigma)
def _parse_list(self, tokens, i, type='Cons'):
# print 'parsing list at ' + str(i)
t = self._next_token(tokens, i)
if t == None or t in _non_list_initial_special_tokens:
# print ' failing immediately '
return (i, None)
(j, s) = self._parse_singleton(tokens, i)
(k, r) = self._parse_list(tokens, j, type)
# print (k,r)
if r == None:
# print ' returning (%d, %s)' % (j, s)
return (j, s)
tree = Tree(type, [s, r])
# print ' returning (%d, %s)' % (k, tree)
return (k, tree)
def _parse_singleton(self, tokens, i):
# print 'parsing singleton at ' + str(i)
t = self._next_token(tokens, i, True)
j = i
result = None
if t == '(':
(j, result) = self._parse_list(tokens, i + 1, 'Cons')
if result == None: raise ValueError('missing contents of (...)')
t = self._next_token(tokens, j, True)
if t != ')': raise ValueError('missing final parenthesis, instead found ' + t)
j = j + 1
elif t == '[':
(j, result) = self._parse_list(tokens, i + 1, 'Or')
if result == None: raise ValueError('missing contents of [...]')
t = self._next_token(tokens, j, True)
if t != ']': raise ValueError('missing final bracket, instead found ' + t)
j = j + 1
elif t in _special_tokens:
raise ValueError('expected identifier, found ' + t)
else:
(j, tree) = self._parse_pair(tokens, i)
result = tree
t = self._next_token(tokens, j)
if t in ['*', '&', '?']:
j = j + 1
result = Tree(t, [result])
return (j, result)
def _build_fsa(self, fsa, entry_node, tree, reverse):
if tree.node == 'Pair':
return self._build_terminal(fsa, entry_node, self._pair_from_tree(tree))
elif tree.node == 'Cons':
return self._build_seq(fsa, entry_node, tree[0], tree[1], reverse)
elif tree.node == 'Or':
return self._build_or(fsa, entry_node, tree[0], tree[1], reverse)
elif tree.node == '*':
return self._build_star(fsa, entry_node, tree[0], reverse)
elif tree.node == '&':
return self._build_plus(fsa, entry_node, tree[0], reverse)
elif tree.node == '?':
return self._build_qmk(fsa, entry_node, tree[0], reverse)
else:
raise RuntimeError('unknown tree node'+tree.node)
def _build_terminal(self, fsa, entry_node, terminal):
new_exit_node = fsa.new_state()
fsa.insert(entry_node, terminal, new_exit_node)
#print '_build_terminal(%d,%s) -> %d' % (entry_node, terminal, new_exit_node)
return new_exit_node
def _build_plus(self, fsa, node, tree, reverse):
node1 = self._build_fsa(fsa, node, tree[0], reverse)
fsa.insert(node1, epsilon, node)
return node1
def _build_qmk(self, fsa, node, tree, reverse):
node1 = fsa.new_state()
node2 = self._build_fsa(fsa, node1, tree, reverse)
node3 = fsa.new_state()
fsa.insert(node, epsilon, node1)
fsa.insert(node, epsilon, node3)
fsa.insert(node2, epsilon, node3)
return node3
def _build_star(self, fsa, node, tree, reverse):
node1 = fsa.new_state()
node2 = self._build_fsa(fsa, node1, tree, reverse)
node3 = fsa.new_state()
fsa.insert(node, epsilon, node1)
fsa.insert(node, epsilon, node3)
fsa.insert(node2, epsilon, node1)
fsa.insert(node2, epsilon, node3)
return node3
def _build_seq(self, fsa, node, tree0, tree1, reverse):
(d0, d1) = (tree0, tree1)
if reverse: (d0, d1) = (d1, d0)
node1 = self._build_fsa(fsa, node, d0, reverse)
node2 = self._build_fsa(fsa, node1, d1, reverse)
# print '_build_seq(%d,%s,%s) -> %d,%d' % (node, tree0, tree1, node1, node2)
return node2
def _build_or(self, fsa, node, tree0, tree1, reverse):
node0 = fsa.new_state()
node1 = fsa.new_state()
node2 = self._build_fsa(fsa, node0, tree0, reverse)
node3 = self._build_fsa(fsa, node1, tree1, reverse)
node4 = fsa.new_state()
fsa.insert(node, epsilon, node0)
fsa.insert(node, epsilon, node1)
fsa.insert(node2, epsilon, node4)
fsa.insert(node3, epsilon, node4)
return node4
class KimmoFSARule:
def __init__(self, name, pair_description, state_descriptions):
self._name = name
self._pairs = parse_pair_sequence(pair_description, 'FSA')
self.transitions = {}
self.is_final = {}
self._state_descriptions = state_descriptions
# validate transitions
for (index, is_final, next_state_array) in state_descriptions:
if not(is_final == True or is_final == False):
raise ValueError('each state description must take the form (index, True/False, [next_state_indices...]')
if len(next_state_array) != len(self.pairs()):
raise ValueError('transition array of wrong size '+ str(len(next_state_array)) + ' ' + str(len(self.pairs())))
self.transitions[index] = next_state_array
self.is_final[index] = is_final
def name(self): return self._name
def pairs(self): return self._pairs
def start(self): return self._state_descriptions[0][0]
def is_state(self, index): return self.transitions.has_key(index)
def contains_final(self, indices):
for i in indices:
if self.is_final[i]: return True
return False
def sorted_pairs(self, subsets):
# pairs are ordered with the transition table, we want to order by the subset size.
# returns a list of pairs AND their indices for use.
# (index, pair) ; index represents the index of the position in the transitions table
sorted_with_index = []
for idx, pair in enumerate(self.pairs()): # enumerate lists all & assigns an index
# important to note that pairs() are in order
# corresponding with transition table
size1 = 1
size2 = 1
if pair.input() in subsets: size1 = len(subsets[pair.input()])
if pair.output() in subsets: size2 = len(subsets[pair.output()])
# setsize = size1 # + size2
sorted_with_index.append([idx,pair,size1,size2])
sorted_with_index.sort(lambda x,y: self.mycompare(x[2],y[2],x[3],y[3]) ) # lambda x, y: x[2] - y[2])
return sorted_with_index
# two field compare.
def mycompare(self, x1, y1, x2=0, y2=0):
if x1 == y1: return x2-y2
else: return x1-y1
def right_advance(self, current_states, input, output, subsets):
next_states = []
contains_halt_state = False
for index in current_states:
# flush the any states
any_next_state = ''
next_state_isset = 0
any_next_states_ary = []
for i, pair, size1, size2 in self.sorted_pairs(subsets): # enumerate(self.pairs()):
# print pair.__repr__()
if pair.matches(input, output, subsets):
# print input, output
# we want to temporarily store an any state (if one matches)
# only 1 any_next_state allowed
# '@'
# consequence of this is that moving to the back prevents discovery
# of of all possible enumerations in forced -> 0 state cases. ie. 0:i -> 0
# recognition causes a problem, here's why. this routine encounters @ before +:i
# it ignores it and goes on to 0:i. 0:i returns under yield, maintaining iterator
# state. advance is called again, iterator state is resumed, but @ was already
# passed, and memory of that state is lost.
# it would be best if enumerate would just sort, but it cannot as it would lose ordering
# also invert under recognize is not properly recursing, as it never even sees the possible
# +:i option.
# OLD CODE; PROBLEM SOLVED (ordering of subsets)
if 0: # ('@' in pair.__repr__()):
# print 'any state match'
# {col num, next state num (0 if fail), is final state}
# if transition row is valid
if self.transitions.has_key(self.transitions[index][i]): ft = self.is_final[self.transitions[index][i]]
else : ft = ''
any_next_states_ary.append([ i, self.transitions[index][i], ft, pair.__repr__() ] )
if not any_next_state:
any_next_state = self.transitions[index][i]
else:
# if not an any state, add like usual
# if not already in next_states, add
# !!! but won't this break without evaluating @ when called several
# times? (i.e. our state is already in next_state
next_state_isset = 1
next_state = self.transitions[index][i]
if self.transitions.has_key(next_state):
if not(next_state in next_states):
next_states.append(next_state)
if self.is_final[next_state]: contains_halt_state = True
break
return (next_states, contains_halt_state)
def __repr__(self):
return '<KimmoFSARule %s>' % (self.name(), )
class KimmoWord:
def __init__(self, letters, gloss, next_alternation=None):
self._letters = letters
self._gloss = gloss
self._next_alternation = next_alternation
def __repr__(self):
return '<KimmoWord %s: %s>' % (self.letters(), self.gloss())
def letters(self): return self._letters
def gloss(self): return self._gloss
def next_alternation(self): return self._next_alternation
class KimmoLexicon:
def __init__(self, name, words):
self._name = name
self._words = words
self._trie = self.build_trie(words)
def __repr__(self):
return '<KimmoLexicon ' + self.name() + '>'
def name(self): return self._name
def words(self): return self._words
def trie(self): return self._trie # tree is ([KimmoWord], [ (char, sub-trie), ... ])
def build_trie(self, words, word_position=0):
if len(words) == 0: return ([], [])
first_chars = {}
for w in words:
if len(w.letters()) <= word_position: continue
fc = w.letters()[word_position]
if first_chars.has_key(fc):
first_chars[fc].append(w)
else:
first_chars[fc] = [ w ]
sub_tries = []
for c, sub_words in first_chars.items():
sub_tries.append( (c, self.build_trie(sub_words, word_position+1)) )
return ( [w for w in words if len(w.letters()) == word_position], sub_tries )
class KimmoAlternation:
def __init__(self, name, lexicon_names):
self._name = name
self._lexicon_names = lexicon_names
def __repr__(self):
return '<KimmoAlternation ' + self.name() + ': ' + str(self.lexicon_names()) + '>'
def name(self): return self._name
def lexicon_names(self): return self._lexicon_names
class KimmoMorphology:
def __init__(self, alternations, lexicons, start='Begin'):
self.alternations = {}
self.lexicons = {}
self._start = start
for a in alternations: self.alternations[a.name()] = a
for l in lexicons: self.lexicons[l.name()] = l
def set_boundary(self, boundary_char):
self.boundary = boundary_char
def initial_state(self):
return self._collect(self._start)
def possible_next_characters(self, state):
chars = set()
self._possible_next_characters(state, chars)
return chars
# from the lexicon, return the next possible character from all words that match the current state
# for instance, if lexicon has iti, ili, and iyi, and current state is first [i], then
# this function will return a set of (t,l,y)
def _possible_next_characters(self, state, chars):
for s in state:
if isinstance(s, KimmoLexicon):
(words, sub_tries) = s.trie()
else:
(words, sub_tries) = s
for w in words:
self._possible_next_characters(self._collect(w.next_alternation()), chars)
for c, sub_trie in sub_tries:
chars.add(c)
def _collect(self, name):
# print 'current alternation: ' + name
if name == None:
return []
elif self.alternations.has_key(name):
result = []
for ln in self.alternations[name].lexicon_names():
result.extend(self._collect(ln))
return result
elif self.lexicons.has_key(name):
return [ self.lexicons[name] ]
else:
# raise ValueError('no lexicon or alternation named ' + name)
return []
def advance(self, state, char):
result = []
# print 'advance'
for s in state:
if isinstance(s, KimmoLexicon):
# print s.name()
(words, sub_tries) = s.trie()
else:
(words, sub_tries) = s
for w in words:
for v in self._advance_through_word(w, char):
yield v
for c, sub_trie in sub_tries:
if c == char: result.append(sub_trie)
if len(result) > 0:
yield (result, [])
# else:
# print 'No Matches in state '
def _advance_through_word(self, word, char):
for s in self.advance(self._collect(word.next_alternation()), char):
state, words = s
if word.gloss():
yield (state, [word] + words)
else:
yield s
class KimmoRuleSet:
def __init__(self, subsets, defaults, rules, null='0'):
self.debug = False
self._rules = rules
self._pair_alphabet = set()
self._subsets = {}
self._null = null
for s in subsets:
self._subsets[s.name()] = s.subset()
for kd in defaults:
for pair in kd.defaults():
# defaults shouldn't contain subsets
if self.is_subset(pair.input()) or self.is_subset(pair.output()):
raise ValueError('default ' + str(pair) + ' contains subset')
self._pair_alphabet.add( ( pair.input() , pair.output() ) )
for r in self.rules():
for kp in r.pairs():
if (not (self.is_subset(kp.input()) or self.is_subset(kp.output()))):
self._pair_alphabet.add( ( kp.input(), kp.output() ) )
def rules(self): return self._rules
def subsets(self): return self._subsets
def is_subset(self, key):
return key[0] == '~' or key in self.subsets()
def null(self): return self._null;
def _evaluate_rule_left_context(self, rule, input, output):
fsa = rule.leftFSA()
if fsa == None: return True
states = [ fsa.start() ]
i = len(input) - 1
while i >= 0:
next_states = []
(result, contains_halt_state) = rule.advance(fsa, states, input[i], output[i], self.subsets())
if contains_halt_state: return True
for s in result:
if not(s in next_states): next_states.append(s)
if (len(next_states) == 0): return False
states = next_states
i = i - 1
return False
def _debug_print_input_and_output(self, position, rule_states, morphological_state,
input, output, this_input, this_output, invert):
if (self.debug):
#indent str
padstring = ''
for x in range(position): padstring = padstring + ' '
print '%s%d %s:%s \n' % (padstring, position, this_input, this_output),
print '%s%d: Input: ' % (padstring, position,),
for i in input:
print ' ' + i + ' ',
if this_input:
print '[' + this_input + ']...',
print
print '%s%d> Output: ' % (padstring, position,),
for o in output:
print ' ' + o + ' ',
if this_output:
print '<' + this_output + '>...',
print
# for (start, rule, fsa_states, required_truth_value) in rule_states:
# print ' {%d %s %s %s}' % (start, rule, fsa_states, required_truth_value)
if False: # morphological_state:
print ' possible input chars = %s' % invert.possible_next_characters(morphological_state)
# print morphological_state
# generate works by passing in the word at each position of the word
# _generate is responsible for testing all the valid chars in the transition alphabet to see if
# they are appropriate surface-underlying transitions.
# it fails entirely if no valid transitions are found
# if one is found, that is the one that is used.
# essentially this is a possible word tree being expanded and failed on branches.
# should return a list of matching words.
def _generate(self, input_tokens, position, rule_states, morphological_state, input, output, result_str, result_words,
invert=False):
# state is [ ( start, rule, states, required_truth_value ) ]
# print 'morphological_state'
# print morphological_state
# if (self.debug) :
# print '_generate'
# print input_tokens, position, input, output, result_str, result_words
# when at the last token or past it.
if ((position >= len(input_tokens)) ): # and (not morphological_state)
if (self.debug) : print ' AT END OF WORD'
# FOR RECOGNIZER
# this will yield some words twice, not all
# also, recognizer is failing to put on the added information like "+genetive"
# we are at the end, so check to see if a boundary char is in the possible set
# and if so, add it and the remaining morphos
if morphological_state:
# print 'morpho'
possible_next_input_chars = invert.possible_next_characters(morphological_state)
# print 'possible_next_input_chars'
# print possible_next_input_chars
# change to boundary char, instead of hardcode
if ('0' in possible_next_input_chars) or ('#' in possible_next_input_chars):
if '0' in possible_next_input_chars: boundary = '0'
elif '#' in possible_next_input_chars: boundary = '#'
# are at the end of the word, so we need to check and return those results
# that contain the boundary char.
# should only be one potential boundary word '0'
# not correct, there can be more than one boundary word.
for next_morphological_state, new_words in invert.advance(morphological_state, boundary):
# yield result_str, result_words + new_words
# print new_words
# print next_morphological_state
# for o in self._generate(input_tokens, position + 1, [] , next_morphological_state,
# new_input, new_output, new_result_str,
# result_words + new_words,
# invert):
# yield o
yield result_str, result_words + new_words
# yield result_str, result_words
else:
# GENERATION CASE
# print 'no-morpho'
self._debug_print_input_and_output(position, rule_states, morphological_state, input, output, None, None, invert)
for (start, rule, fsa_states, required_truth_value) in rule_states:
if isinstance(rule, KimmoArrowRule):
truth_value = False # since it hasn't reached a halt state
elif isinstance(rule, KimmoFSARule):
truth_value = rule.contains_final(fsa_states)
if (required_truth_value != truth_value):
if (self.debug):
print ' BLOCKED by rule {%d %s %s}' % (start, rule, required_truth_value)
print fsa_states
break
else:
if 0: # (self.debug):
print ' passed rule {%d %s %s}' % (start, rule, required_truth_value)
else:
if (self.debug):
print ' SUCCESS!'
yield result_str, result_words
else:
if morphological_state: # recognizer; get the next possible surface chars that can result in
# the next char
possible_next_input_chars = invert.possible_next_characters(morphological_state)
# print 'possible_next_input_chars'
# print possible_next_input_chars
# foreach pair in our alphabet (includes per subset)
# print self._pair_alphabet
for pair_input, pair_output in self._pair_alphabet:
if (pair_input != self.null() and morphological_state):
# if this pair does not apply, i.e. it is not in the possible
# chars from the lexicon
if not(pair_input in possible_next_input_chars):
continue
if invert:
# check if the output of a transition is in the input string (input_tokens)
compare_token = pair_output
else:
compare_token = pair_input
if not(compare_token == self.null() or compare_token == input_tokens[position]): continue
self._debug_print_input_and_output(position, rule_states, morphological_state,
input, output, pair_input, pair_output, invert)
fail = None
next_rule_states = []
# first, evaluate currently activated rules
# s is the current rule & its state
rule_state_debug = ' '
for s in rule_states:
# advance one through each rule
(start, rule, fsa_state_set, required_truth_value) = s
current_state_str = '['
for x in fsa_state_set: current_state_str += str(x)
rule_state_debug += current_state_str
(next_fsa_state_set, contains_halt_state) = rule.right_advance(fsa_state_set, pair_input, pair_output,
self.subsets())
current_state_str = ''
for x in next_fsa_state_set: current_state_str += str(x)
if not current_state_str: current_state_str = '0 (FAIL)'
rule_state_debug += ('->' + current_state_str + '] ')
if (contains_halt_state == True and isinstance(rule, KimmoArrowRule)):
if (required_truth_value == False):
fail = s
break
else:
if (0): # (self.debug):
print ' passed rule {%d %s %s}' % (start, rule, required_truth_value)
elif (len(next_fsa_state_set) == 0):
# if it isn't true, then it will have to fail, bcs we are at
# the end of the state set.
# truth is evaluated by following the states until the end.
if (required_truth_value == True):
fail = s
break
else:
if (0): # (self.debug):
print ' passed rule {%d %s %s}' % (start, rule, required_truth_value)
else:
next_rule_states.append( (start, rule, next_fsa_state_set, required_truth_value) )
if (self.debug) : print rule_state_debug
if (fail):
if (self.debug):
print ' BLOCKED by rule %s' % (fail,)
continue
# activate new KimmoArrowRules
for rule in self.rules():
if not(isinstance(rule, KimmoArrowRule)): continue
required_truth_value = rule.matches(pair_input, pair_output, self.subsets())
if required_truth_value == None: continue
left_value = self._evaluate_rule_left_context(rule, input, output)
if (left_value == False):
if (required_truth_value == True):
fail = rule
continue
if (rule.rightFSA()):
if (self.debug):
print ' adding rule {%d %s %s}' % (position, rule, required_truth_value)
next_rule_states.append( (position, rule, [ rule.rightFSA().start() ], required_truth_value) )
else:
if (required_truth_value == False):
fail = rule
continue
else:
if (0): # (self.debug):
print ' passed rule ' + str(rule)
# if did not fail, call recursively on next chars
if (fail == None):
new_position = position
new_input = input + [pair_input]
new_output = output + [pair_output]
new_result_str = result_str
if (pair_input != self.null()):
if invert:
new_result_str = result_str + pair_input
else:
new_position = position + 1
if (pair_output != self.null()):
if invert:
new_position = position + 1
else:
new_result_str = result_str + pair_output
# morph state & generation steps through a char at a time.
# as it is, it only yields its morph if there is a valid next morphology
if morphological_state and pair_input != self.null():
for next_morphological_state, new_words in invert.advance(morphological_state, pair_input):
# print 'ENTERING LEXICON '
for o in self._generate(input_tokens, new_position, next_rule_states, next_morphological_state,
new_input, new_output, new_result_str,
result_words + new_words,
invert):
yield o
else:
for o in self._generate(input_tokens, new_position, next_rule_states, morphological_state,
new_input, new_output, new_result_str, result_words, invert):
yield o
else:
if (self.debug):
print ' BLOCKED by rule ' + str(fail)
def _initial_rule_states(self):
return [ (0, rule, [ rule.start() ], True) for rule in self.rules() if isinstance(rule, KimmoFSARule)]
def generate(self, input_tokens):
"""Generator: yields output strings"""
for o, w in self._generate(input_tokens, 0, self._initial_rule_states(), None, [], [], '', None):
yield o
def recognize(self, input_tokens, morphology=None):
"""Recognizer: yields (input_string, input_words)"""
morphology_state = None
output_words = None
invert = True
if morphology:
morphology_state = morphology.initial_state()
output_words = []
invert = morphology
if not morphology_state:
print "Bad Morphological State, failing recognition"
return
if (self.debug) : print 'recognize: ' + input_tokens
# print output_words
for o in self._generate(input_tokens, 0, self._initial_rule_states(), morphology_state, [], [], '',
output_words, invert):
yield o # yielding a list of possible words.
def _generate_test(s, input):
resultlist = '%s -> ' % (input,),
padlevel = len(input) + 4
padstring = ''
# for x in range(padlevel): padstring = padstring + ' '
tmplist = '%s' % ('***NONE***'),
for o in s.generate(input):
tmplist = '%s%s\n' % (padstring,o,),
resultlist = resultlist + tmplist
padstring = ''
for x in range(padlevel): padstring = padstring + ' '
tmplist = '%s' % (''),
resultlist = resultlist + tmplist
return resultlist
def _recognize_test(s, input, morphology=None):
resultlist = '%s <- ' % (input,),
padlevel = len(input) + 4
padstring = ''
# for x in range(padlevel): padstring = padstring + ' '
tmplist = '%s' % ('***NONE***'),
for o, w in s.recognize(input, morphology):
if w:
# print
tmplist = '\n %s %s \n' % (o, w),
resultlist = resultlist + tmplist
else:
tmplist = '%s%s \n' % (padstring,o,),
resultlist = resultlist + tmplist
padstring = ''
for x in range(padlevel): padstring = padstring + ' '
tmplist = '%s' % (''),
# print
# q = re.compile('(\{|\})')
# q.sub("", resultstring[0])
resultlist = resultlist + tmplist
return resultlist
def read_kimmo_file(filename, gui=None):
path = os.path.expanduser(filename)
try:
f = open(path, 'r')
except IOError, e:
path = os.path.join(get_basedir(), "kimmo", filename)
try:
f = open(path, 'r')
except IOError, e:
if gui:
gui.guiError(str(e))
else:
print str(e)
print "FAILURE"
return ""
print "Loaded:", path
return f
# MAIN
# if __name__ == '__main__': KimmoGUI(None, None)
# if __name__ == '__main__': tkImageView("")
if __name__ == '__main__':
filename_lex = ''
filename_rul = ''
filename_batch_test = ''
recognize_string = ''
generate_string = ''
console_debug = 0
for x in sys.argv:
# if -r/g is defined (recognize or generate word)
# or batch file is defined
# run in commandline mode.
if ".lex" in x: filename_lex = x
elif ".rul" in x: filename_rul = x
elif ".batch" in x: filename_batch_test = x
elif x[0:3] == "-r:": recognize_string = x[3:len(x)]
elif x[0:3] == "-g:": generate_string = x[3:len(x)]
elif x == "debug": console_debug = 1
print 'Tips:'
print 'kimmo.cfg is loaded by default, so if you name your project that, '
print "it will be loaded at startup\n"
print 'For commandline operation:'
print ' (for instance if you want to use a different editor)'
print "To Recognize:"
print " % python kimmo.py english.lex english.rul -r:cats"
print "To Generate:"
print " % python kimmo.py english.lex english.rul -g:cat+s"
print "To Batch Test:"
print " % python kimmo.py english.lex english.rul english.batch_test"
print "With Debug and Tracing:"
print " % python kimmo.py english.lex english.rul -r:cats debug\n"
# print filename_lex
# print filename_rul
# print filename_batch_test
# print recognize_string
# print generate_string
if (recognize_string or generate_string or filename_batch_test) and filename_rul:
kimmoinstance = KimmoControl("","",filename_lex,filename_rul,console_debug)
# creation failed, stop
if not kimmoinstance.ok :
print kimmoinstance.errors
sys.exit()
if recognize_string:
recognize_results = kimmoinstance.recognize(recognize_string)
print recognize_results
if generate_string:
generate_results = kimmoinstance.generate(generate_string)
print generate_results # remember to format
if filename_batch_test: # run a batch
kimmoinstance.batch(filename_batch_test)
else:
KimmoGUI(None, None)
# constructor takes arguments:
# KimmoControl(lexicon_string, rule_string, lexicon_filename, rule_filename, debug)
# the constructor requires both lexicon and rules for recognition.
# you can provide either the file contents as a string, or as a filename.
# if only used to generate, only a rule file/string is necessary.
# kimmoinstance = KimmoControl("","",'','./englex/english.rul',0)
# kimmoinstance = KimmoControl("","",'kimmo.lex','kimmo.rul',0)
# generate_results = kimmoinstance.generate("cat+s")
# print generate_results
# recognize_results = kimmoinstance.recognize("cats")
# print recognize_results
| Python |
# Natural Language Toolkit: Paradigm Visualisation
#
# Copyright (C) 2005 University of Melbourne
# Author: Will Hardy
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
# Front end to a Python implementation of David
# Penton's paradigm visualisation model.
# Author:
#
# Run: To run, first load a paradigm using
# >>> a = paradigm('paradigm.xml')
# And run the system to produce output
# >>> a.show('table(one, two, three)')
#
# Other methods:
# demo() # a quick demonstration
# a.setFormat('html') # output is formatted as HTML
# a.setFormat('text') # output is formatted as HTML
# a.setOutput('filename') # output is sent to filename
# a.setOutput('term') # output is sent to terminal
from xml.dom.ext.reader import Sax2
from paradigmquery import ParadigmQuery
import re, os
class Paradigm(object):
"""
Paradigm visualisation class
*Usage*
Simple usage of the system would be:
>>> from paradigm import Paradigm
>>> p = Paradigm('german.xml')
>>> p.show('table(case, gender/number, content)')
Here, a table is generated in HTML format and sent to the file ``output.html``.
The table can be viewed in a browser, and is updated for every new query.
A more advanced usage of the system is show below.
The user simply creates a paradigm p, changes the output format and location,
and calls a dedicated prompt to enter the query:
>>> from paradigm import Paradigm
>>> p = Paradigm('german.xml')
>>> p.setFormat('html')
>>> p.setOutput('test.html')
>>> p.setCSS('simple.css')
>>> p.prompt()
> table(case, gender/number, content)
Please note, however, that plain text tables have not yet been implemented.
"""
def __init__(self, p_filename):
"""
Load the given paradigm
p_filename is a string representing the filename of a paradigm xml file
"""
# Store input paradigm filename
self.loadParadigm(p_filename)
# set default values (text output, to terminal)
self.format = "html"
self.output = "output.html"
self.css = "simple.css"
def prompt(self):
"""
Changes to a dedicated prompt
Type 'exit' or 'quit' to exit
"""
s = ""
while s != "exit":
s = "exit"
try: s = raw_input(">")
except EOFError:
print s
if s == "exit":
return
if s == "quit":
return
if s:
while s[-1] in "!.": s = s[:-1]
self.show(s)
def show(self, p_string):
"""
Process and display the given query
"""
try:
# parse the query
parse = ParadigmQuery(p_string)
except:
print "Could not parse query."
return
try:
# Fetch the parsed tree and make presentation
result = Sentence(self, parse.getTree())
# Check that a presentation actually exists
if result == None:
raise Error
except:
print "Sorry, no result can be returned"
return
try:
# Print HTML output if format is set, otherwise plain text
if self.format == "html":
output = '<html>\n'
# Include CSS if we need to
if self.css <> None:
output += '<link rel="stylesheet" href="'
output += self.css
output += '" type="text/css" media="screen" />\n'
output += '<body>'
output += "<table cellspacing=\"0\" cellpadding=\"0\">"
output += result.getHTML()
output += "</table>\n"
output += '</body></html>\n'
else:
output = result.getText()
except:
output = None
print "--no output--"
return
# Print to terminal if output is set, otherwise to file
if self.output == "term":
print output
else:
print "Output written to file:", self.output
f = open(self.output, 'w')
f.write(output)
# Return happily
return
def setFormat(self, p_string=None):
"""
Set the output format: "html" or "text"
"""
# Default value
if p_string == None:
p_string = "text"
# set to html if requested, otherwise text
if p_string == "html":
self.format = "html"
elif p_string == "text":
self.format = "text"
else:
print "Unknown format:", p_string
print "Valid formats are: text, html"
print "Setting format = text"
self.format = "text"
def setCSS(self, p_string=None):
"""
Set the file location for a Cascading Stylesheet: None or filename
This allows for simple formatting
"""
if p_string <> None:
print "Using CSS file:", p_string
self.output = p_string
def setOutput(self, p_string=None):
"""
Set the output location: "term" or filename
"""
# Default
if p_string == None:
p_string = "term"
# set to term if requested, otherwise filename
if p_string == "term":
print "Directing output to terminal"
else:
print "Directing output to file:", p_string
self.output = p_string
def loadParadigm(self, p_filename ):
"""
Load the given paradigm (XML file)
Attributes are stored in self.attributes
Data are stored in self.data
They can be accessed as follows:
self.attributes['gender'] # list of genders
self.data[6]['gender'] # gender for the sixth data object
self.data[6]['content'] # content for the sixth data object
"""
from en.parser.nltk_lite.corpora import get_basedir
basedir = get_basedir()
# Look for the file
try_filename = os.path.join(get_basedir(), "paradigms", p_filename)
try:
f = open(try_filename)
p_filename = try_filename
except IOError:
print "Cannot find file"
return None
f.close()
# These variables will be set by this method
self.attributes = {} # A new dictionary
self.data = [] # A new list
# XML admin: create Reader object, parse document
reader = Sax2.Reader()
doc = reader.fromStream(p_filename)
# Cycle through the given attributes and add them to self.attributes
# for <name> in <attributes>
attributes = doc.getElementsByTagName('attributes')[0]
for name in attributes.getElementsByTagName('name'):
# Setup a list of attribute values
tmp_list = []
# for each value under name, store in list
for value in name.getElementsByTagName('value'):
tmp_list.append(value.getAttribute('value'))
# Store list of values in dictionary
self.attributes[name.getAttribute('name')] = tmp_list
# Cycle through data objects and add them to self.data
# for <form> in <paradigm>
forms = doc.getElementsByTagName('paradigm')[0]
for form in forms.getElementsByTagName('form'):
# Initialise a temporary dictionary
tmp_dict = {}
for value in form.getElementsByTagName('attribute'):
tmp_dict[value.getAttribute('name')] = value.getAttribute('value')
# Add the new dictionary to the data list
self.data.append(tmp_dict)
# Talk to the user
print "Paradigm information successfully loaded from file:", p_filename
# State the number and print out a list of attributes
print " "*4 + str(len(self.attributes)) + " attributes imported:",
for att in self.attributes:
print att,
print
# State the number of paradigm objects imported
print " "*4 + str(len(self.data)) + " paradigm objects imported."
return
class Sentence(object):
"""
Manages any operation
Passes request onto other handlers if necessary
"""
def __init__(self, p_paradigm, p_tree):
"""
p_paradigm is the given paradigm (attributes and data)
p_tree is the query tree
"""
# store parameters
self.paradigm = p_paradigm
self.tree = p_tree
# discover the type
self.type = self.getType(self.tree)
# Handle each possible type
if self.type == 'O':
self.item = Sentence(self.paradigm, self.tree[0])
if self.type == 'D':
self.item = Domain(self.paradigm, self.tree)
if self.type == 'H':
self.item = Hierarchy(self.paradigm, self.tree)
if self.type == 'T':
self.item = Table(self.paradigm, self.tree)
def getList(self):
"""
Returns values in the form of a list
"""
if self.tree == None:
return None
return self.item.getList()
def getHTML(self):
"""
Returns values in html (table) form
"""
return self.item.getHTML()
def getHorizontalHTML(self,p_parentSpan=1):
"""
Returns values in html (table) form
"""
return self.item.getHorizontalHTML(p_parentSpan)
def getText(self):
"""
Returns values in plain text form
"""
return self.item.getText()
def getConditions(self):
"""
Return a list of conditions for each combination (cell)
"""
return self.item.getConditions()
def getMaxWidth(self):
"""
Returns the width in number of characters
"""
return self.item.getMaxWidth()
def getSpan(self):
"""
Returns the span (requred for "rowspan" and "colspan" HTML attributes)
"""
return self.item.getSpan()
def getDepth(self):
"""
Get the depth
"""
return self.item.getDepth()
def getType(self, p_tree=None):
"""
Determine the type of the current node of the tree
This need not be overridden
"""
if p_tree == None:
p_tree = self.tree
# This is in the second character of the string representation
return str(p_tree)[1:2]
class Domain(Sentence):
"""
Manages a domain operation
Provides: Domain(paradigm,tree)
"""
def __init__(self, p_paradigm, p_tree):
"""
p_paradigm is the given paradigm (attributes and data)
p_tree is the query tree
"""
self.paradigm = p_paradigm
# Validate that this is a domain
assert self.getType(p_tree) == 'D'
# Store the attribute
self.attribute = p_tree[0]
self.error = None
# Check that the requested attribute is available
try:
self.paradigm.attributes[self.attribute]
except KeyError:
self.error = "I couldn't find this attribute: " + self.attribute
print self.error
def __getitem__(self, p_index):
return self.paradigm.attributes[self.attribute][p_index]
def getList(self):
"""
Return the domain in list form
"""
return self.paradigm.attributes[self.attribute]
def getHTML(self):
"""
Return html for this domain
"""
ret_string = ""
for item in self.getList():
ret_string += "<tr><td>" + item + "</td></tr>"
return ret_string
def getHorizontalHTML(self,p_parentSpan=1):
"""
Return a horizontal html table
"""
ret_string = ""
for item in self.getList():
ret_string += "<td>" + item + "</td>"
return "<tr>" + ret_string*p_parentSpan + "</tr>"
def getText(self):
"""
Return text for this domain
"""
ret_string = ""
for item in self.getList():
ret_string += item + "\n"
return ret_string
def getConditions(self):
"""
Return a list of conditions for each combination (cell)
"""
ret_conds = []
for item in self.getList():
new = {self.attribute: item}
#new[self.attribute] = item
ret_conds.append(new)
return ret_conds
def getMaxWidth(self):
"""
Get max width (chars) for display purposes
"""
max_width = 0
for item in self.getList():
if max_width < len(item):
max_width = len(item)
return max_width
def getSpan(self):
"""
Get the span of this domain (number of elements)
"""
return len(self.getList())
def getDepth(self):
"""
Get the depth of this domain (always one!)
"""
return 1
class Hierarchy(Sentence):
"""
Manages a hierarchy operation
Provides: Hierarchy(paradigm,tree)
"""
def __init__(self, p_paradigm, p_tree):
"""
p_paradigm is the given paradigm (attributes and data)
p_tree is the tree representation of this part of the query (Tree)
"""
self.paradigm = p_paradigm
self.error = None
self.tree = p_tree
# Validate that this is a Hierarchy
assert self.getType(p_tree) == 'H'
# Validate that the root is a Domain
assert self.getType(p_tree[0]) == 'D'
# Set the root and the leaf
self.root = Domain(self.paradigm, p_tree[0])
self.leaf = Sentence(self.paradigm, p_tree[1])
def getList(self):
"""
Return the hierarchy in list form
"""
# Get child lists
rootList = self.root.getList()
leafList = self.leaf.getList()
# Combine lists into an array
ret_val = []
for item_root in rootList:
for item_leaf in leafList:
ret_val.append([item_root,item_leaf])
return ret_val
def getHTML(self):
"""
Return a html table for this hierarchy
"""
ret_string = ""
for index in range(len(self.root.getList())):
leafCells = self.leaf.getHTML()[4:]
ret_string += "<tr><td rowspan=\"" + str(self.leaf.getSpan()) + "\">" + self.root[index] \
+ "</td>" + leafCells
return ret_string
def getHorizontalHTML(self,p_parentSpan=1):
"""
Return a horizontal html table
"""
ret_string = ""
# Add a new cell for each root item
for index in range(len(self.root.getList())):
ret_string += "<td colspan=\"" + str(self.leaf.getSpan()) + "\">" \
+ self.root[index] + "</td>"
# Recusively get the horizontalHTML from the leaf children
leafCells = self.leaf.getHorizontalHTML(p_parentSpan*len(self.root.getList()))
# Return the new row and the leaf cells
return "<tr>" + ret_string*p_parentSpan + "</tr>" + leafCells
def getText(self):
"""
Return text for this hierarchy
"""
ret_string = ""
# Lengths for rendering display
max_width_root = self.root.getMaxWidth()
max_width_leaf = self.leaf.getMaxWidth()
# add root string and call getText() for leaf node
# (newlines in the leaf node need to have whitespace added)
for index in range(len(self.root.getList())):
ret_string += self.root[index].ljust(max_width_root) + " " \
+ self.leaf.getText().ljust(max_width_leaf).replace('\n',"\n" \
+ " "*(max_width_root+1)) + "\n"
# Remove any blank lines and return the string
re_blank = re.compile('\n[ ]+\n')
return re_blank.sub('\n',ret_string)
def getConditions(self):
"""
Return a list of conditions for each combination (cell)
"""
ret_conds = []
# For each root item
for item_r in self.root.getList():
# for each leaf condition
for cond_l in self.leaf.getConditions():
# Add the root node's condition
cond_l[self.root.attribute] = item_r
# Append this to the return list of conditions
ret_conds.append(cond_l)
# Return our list
return ret_conds
def getMaxWidth(self):
"""
Return the maximum width (in chars) this hierarchy will take up
"""
return self.root.getMaxWidth() + self.leaf.getMaxWidth() + 1
def getDepth(self):
"""
Get the depth of this hierarchy
"""
return 1 + self.leaf.getDepth()
def getSpan(self):
"""
Get the span (for HTML tables) of this hierarchy
"""
return self.root.getSpan() * self.leaf.getSpan()
class Table(Sentence):
"""
Manages a table operation
Provides: Table(paradigm,tree)
"""
def __init__(self, p_paradigm, p_tree):
"""
p_paradigm is the given paradigm (attributes and data)
p_tree is the tree representation of this part of the query (Tree)
"""
self.paradigm = p_paradigm
self.error = None
self.tree = p_tree
# Validate that this is a Table
assert self.getType(p_tree) == 'T'
# Set the table arguments
self.horizontal = Sentence(self.paradigm, p_tree[0])
self.vertical = Sentence(self.paradigm, p_tree[1])
self.cells = Sentence(self.paradigm, p_tree[2])
def getList(self):
"""
Return the table (cells) in list form
"""
ret_val = []
return ret_val
def getHTML(self):
"""
Return a html table for this table operation
"""
# Start with the dead cell
dead_cell = "<tr><td colspan=\"" + str(self.vertical.getDepth()) \
+ "\" rowspan=\"" + str(self.horizontal.getDepth()) \
+ "\"></td>"
# Insert horizintal header
horizontal_header = self.horizontal.getHorizontalHTML()[4:].replace('td','th')
#horizontal_header = self.horizontal.getHorizontalHTML().replace('td','th')
# Get the vertical header
vertical_header = self.vertical.getHTML().replace('td','th')
str_cells = ""
# Reset conditions
conditions = {}
# get a list of conditions for the row
conditions_v = self.vertical.getConditions()
# for each row
for cond_v in conditions_v:
str_cells += "<tr>"
# get a list of conditions for the row
conditions_h = self.horizontal.getConditions()
# For each column
for cond_h in conditions_h:
# Get the data for this cell, given the hori and vert conditions
cell_data = self.getData(self.cells.tree, dictJoin(cond_v,cond_h))
# Add the cell
str_cells += "<td>" + cell_data + "</td>"
# End the row
str_cells += "</tr>"
# VERTICAL HEADER INCLUSION
# Split rows into a list
vertical_header_rows = vertical_header.split('</tr>')
cell_rows = str_cells.replace('<tr>','').split('</tr>')
# Join two lists
zipped = zip(vertical_header_rows, cell_rows)
str_zipped = ""
for (header,cells) in zipped:
if header <> '':
str_zipped += header + cells + "</tr>\n"
# Return all the elements
return dead_cell + horizontal_header + str_zipped
def getHorizontalHTML(self,p_parentSpan=1):
"""
Return a horizontal html table (?)
"""
print "?: getHorizontalHTML() called on a table."
return None
def getText(self):
"""
Return text for this table (?)
"""
print "?: getText() for a table? HAHAHAHAHA"
print "call setFormat('html') if you want to run queries like that"
return
def getConditions(self):
"""
Return conditions for this table (?)
"""
print "?: getConditions() called on a table. I don't think so."
return None
def getMaxWidth(self):
"""
Return the maximum width this table could take up.
... I hope you're not trying to nest tables ...
"""
return self.cells.getMaxWidth() + self.vertical.getMaxWidth() + 1
def getSpan(self):
"""
Return span for this table (?)
"""
print "WTF: getSpan() called on a table."
return None
def getData(self, p_return, p_attDict):
"""
Retrieve data that matches the given list of attributes
Returns (an HTML) string of values that match.
p_return is a tree pointing to the key of the value to include in the return
p_attDict is a dictionary of conditions.
"""
output = []
return_key = p_return.leaves()[0]
# For each data object in the paradigm
for datum in self.paradigm.data:
inc = True
# For each given attribute requirement
for att in p_attDict.keys():
# If the data object fails the requirement do not include
if datum[att] != p_attDict[att]:
inc = False
break
# If it passed all the tests, include it
if inc == True:
output.append(datum[return_key])
# Return what we found (make sure this is a string)
if len(output) == 1:
return output[0]
else:
# Hardcoded HTML goodness
# (Obviously this will have to change for text output)
ret_str = "<table>"
for item in output:
ret_str += "<tr><td>" + item + "</td></tr>"
ret_str += "</table>"
return ret_str
def dictJoin(dict1,dict2):
"""
A handy function to join two dictionaries
If there is any key overlap, dict1 wins!
(just make sure this doesn't happen)
"""
for key in dict1.keys():
dict2[key] = dict1[key]
return dict2
def demo():
# Print the query
print """
================================================================================
Load: Paradigm(file)
================================================================================
"""
print
print ">>> a = Paradigm('german.xml')"
print
a = Paradigm('german.xml')
print
print ">>> a.setOutput('term')"
print
a.setOutput('term')
print
print ">>> a.setFormat('text')"
print
a.setFormat('text')
# Print a domain
print """
================================================================================
Domain: case
================================================================================
"""
print
print ">>> a.show('case')"
print
a.show('case')
# Print a hierarchy
print """
================================================================================
Hierarchy: case/gender
================================================================================
"""
print
print ">>> a.show('case/gender')"
print
a.show('case/gender')
# Print a table
print """
================================================================================
Table: table(case/number,gender,content)
================================================================================
"""
print
print ">>> a.setOutput('demo.html')"
print
a.setOutput('demo.html')
print
print ">>> a.setFormat('html')"
print
a.setFormat('html')
print
print ">>> a.show('table(case/number,gender,content)')"
print
a.show('table(case/number,gender,content)')
# Some space
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Concordance System
#
# Copyright (C) 2005 University of Melbourne
# Author: Peter Spiller
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from en.parser.nltk_lite.corpora import brown
from math import *
import re
from en.parser.nltk_lite.probability import *
class SentencesIndex(object):
"""Class implementing an index of a collection of sentences.
Given a list of sentences, where each sentence is a list of words,
this class generates an index of the list. Each word should be a (word, POS
tag) pair. The index is stored as a dictionary, with the hashable items as
keys and a list of (sentence number, word number) tuples as values. This
class also generates a list of sentence lengths.
"""
def __init__(self, sentences):
""" Constructor. Takes the list of sentences to index.
@type sentences: list
@param sentences: List of sentences to index. Sentences should be
lists of (string, string) pairs.
"""
sentenceCount = 0
self.index = {}
self.lengths = []
# for each sentence:
for sentence in sentences:
# add the sentences length to the list of sentence lengths
self.lengths.append(len(sentence))
wordCount = 0
for word in sentence:
self.index[word] = self.index.get(word, []) + [(sentenceCount, wordCount)]
wordCount += 1
sentenceCount += 1
def getIndex(self):
""" Returns the index dictionary.
@rtype: dictionary
@returns: The dictionary containing the index.
"""
return self.index
def getSentenceLengths(self):
""" Returns the list of sentence lengths.
Element 0 is the length of the first sentence, element 1 the second,
etc.
@rtype: list
@returns: List of lengths of sentences.
"""
return self.lengths
class IndexConcordance(object):
""" Class that generates concordances from a list of sentences.
Uses an index for efficiency. If a SentencesIndex object is provided,
it will be used, otherwise one will be constructed from the list of
sentences. When generating a concordance, the supplied regular expression
is used to filter the list of words in the index. Any that match are looked
up in the index, and their lists of (sentence number, word number) pairs are
used to extract the correct amount of context from the sentences.
Although this class also allows regular expressions to be specified for the
left and right context, they are not used on the index. If only left/right
regexps are provided, the class will essentially generate a concordance for
every word in the corpus, then filter it with the regexps. This will not be
very efficient and requires very large amounts of memory.
@cvar SORT_WORD: Constant for sorting by target word.
@cvar SORT_POS: Constant for sorting by target word's POS tag.
@cvar SORT_NUM: Constant for sorting by sentence number.
@cvar SORT_RIGHT_CONTEXT: Constant for sorting by the first word of the
right context.
"""
# constants for different types of sort
SORT_WORD = 0
SORT_POS = 1
SORT_NUM = 2
SORT_RIGHT_CONTEXT = 3
def __init__(self, sentences, index=None):
""" Constructor.
Arguments:
@type sentences: list
@param sentences: List of sentences to create a concordance for.
Sentences should be lists of (string, string) pairs.
@type index: SentencesIndex
@param index: SentencesIndex object to use as an index. If this is
not provided, one will be generated.
"""
self.sentences = sentences
self.index = index
# generate an index if one wasn't provided
if self.index == None:
self.index = SentencesIndex(self.sentences)
def formatted(self, leftRegexp=None, middleRegexp=".*", rightRegexp=None,
leftContextLength=3, rightContextLength=3, contextInSentences=False,
contextChars=50, maxKeyLength=0, showWord=True,
sort=0, showPOS=True, flipWordAndPOS=False, verbose=False):
"""Generates and displays keyword-in-context formatted concordance data.
This is a convenience method that combines raw() and display()'s
options. Unless you need raw output, this is probably the most useful
method.
@type leftRegexp: string
@param leftRegexp: Regular expression applied to the left context
to filter output. Defaults to None.
@type middleRegexp: string
@param middleRegexp: Regular expression applied to target word to
filter output. Defaults to ".*" (ie everything).
@type rightRegexp: string
@param rightRegexp: Regular expression applied to the right context
to filter output. Defaults to None.
@type leftContextLength: number
@param leftContextLength: Length of left context. Defaults to 3.
@type rightContextLength: number
@param rightContextLength: Length of right context. Defaults to 3.
@type contextInSentences: number
@param contextInSentences: Determines whether the context lengths
arguments are in words or sentences. If false, the context lengths
are in words - a rightContextLength argument of 2 results in two
words of right context. If true, a rightContextLength argument of 2
results in a right context consisting of the portion of the target
word's sentence to the right of the target, plus the two sentences
to the right of that sentence. Defaults to False.
@type contextChars number
@param contextChars: Amount of context to show. If set to less than
0, does not limit amount of context shown
(may look ugly). Defaults to 55.
@type maxKeyLength: number
@param maxKeyLength: Max number of characters to show for the
target word. If 0 or less, this value is
calculated so as to fully show all target
words. Defaults to 0.
@type showWord: boolean
@param showWord: Whether to show words. Defaults to True.
@type sort: integer
@param sort: Should be set to one the provided SORT constants. If
SORT_WORD, the output is sorted on the target word. If SORT_POS, the
output is sorted on the target word's POS tag. If SORT_NUM, the
output is sorted by sentence number. If SORT_RIGHT_CONTEXT, the
output is sorted on the first word of the right context. Defaults to
SORT_WORD.
@type showPOS: boolean
@param showPOS: Whether to show POS tags. Defaults to True.
@type flipWordAndPOS: boolean
@param flipWordAndPOS: If true, displays POS tags first instead of
words (ie prints 'cc/and' instead of 'and/cc'). Defaults to False.
@type verbose: boolean
@param verbose: Displays some extra status information. Defaults
to False.
"""
self.format(self.raw(leftRegexp, middleRegexp, rightRegexp, leftContextLength,
rightContextLength, contextInSentences, sort, verbose), contextChars,
maxKeyLength, showWord, showPOS, flipWordAndPOS, verbose)
def raw(self, leftRegexp=None, middleRegexp=".*", rightRegexp=None,
leftContextLength=3, rightContextLength=3, contextInSentences=False,
sort=0, verbose=False):
""" Generates and returns raw concordance data.
Regular expressions supplied are evaluated over the appropriate part of
each line of the concordance. For the purposes of evaluating the regexps,
the lists of (word, POS tag) tuples are flattened into a space-separated
list of word/POS tokens (ie the word followed by '/' followed by the POS
tag). A regexp like '^must/.*' matches the word 'must' with any POS tag,
while one like '.*/nn$' matches any word with a POS tag of 'nn'. All
regexps are evaluated over lowercase versions of the text.
@type leftRegexp: string
@param leftRegexp: Regular expression applied to the left context
to filter output. Defaults to None.
@type middleRegexp: string
@param middleRegexp: Regular expression applied to target word to
filter output. Defaults to ".*" (ie everything).
@type rightRegexp: string
@param rightRegexp: Regular expression applied to the right context
to filter output. Defaults to None.
@type leftContextLength: number
@param leftContextLength: Length of left context. Defaults to 3.
@type rightContextLength: number
@param rightContextLength: Length of right context. Defaults to 3.
@type contextInSentences: number
@param contextInSentences: Determines whether the context lengths
arguments are in words or sentences. If false, the context lengths
are in words - a rightContextLength argument of 2 results in two
words of right context. If true, a rightContextLength argument of 2
results in a right context consisting of the portion of the target
word's sentence to the right of the target, plus the two sentences
to the right of that sentence. Defaults to False.
@type sort: integer
@param sort: Should be set to one the provided SORT constants. If
SORT_WORD, the output is sorted on the target word. If SORT_POS, the
output is sorted on the target word's POS tag. If SORT_NUM, the
output is sorted by sentence number. If SORT_RIGHT_CONTEXT, the
output is sorted on the first word of the right context. Defaults to
SORT_WORD.
@type verbose: boolean
@param verbose: Displays some extra status information. Defaults
to False.
@rtype: list
@return: Raw concordance ouput. Returned as a list of
([left context], target word, [right context], target word
sentence number) tuples.
"""
# compile the middle regexp.
reg = re.compile(middleRegexp)
if verbose:
print "Matching the following target words:"
wordLocs = []
# get list of (sentence, word) pairs to get context for
for item in self.index.getIndex().iteritems():
if reg.match("/".join([item[0][0].lower(), item[0][1]])):
if verbose:
print "/".join(item[0])
wordLocs.append(item[1])
print ""
items = []
# if context lengths are specified in words:
if contextInSentences == False:
# for each list of (sentence, word offset in sentence) pairs:
for wordList in wordLocs:
# for each (sentence, word offset in sentence) pair:
for sentenceNum, offset in wordList:
# set pointers to the left- and rightmost sentences to be
# looked at to the sentence the target word is in
leftCorpusIndex = sentenceNum
rightCorpusIndex = sentenceNum
# number of words to include in the left context is
# initially everything in the sentence up to the target
leftLength = offset
# number of words to include in the left context is
# initially everything in the sentence after the target
rightLength = self.index.getSentenceLengths()[sentenceNum] - offset - 1
# while the length of the left context is less than what we
# need, keep decreasing the left corpus index (ie adding
# sentences to the left context).
while leftLength < leftContextLength:
leftCorpusIndex -= 1
# if the new corpus index would fall off the end of the
# list, stop at 0
if(leftCorpusIndex < 0):
leftCorpusIndex = 0
break
# adjust length and offset
leftLength += self.index.getSentenceLengths()[leftCorpusIndex]
offset += self.index.getSentenceLengths()[leftCorpusIndex]
# while the length of the right context is less than what we
# need, keep increasing the right corpus index (ie adding
# sentences to the right context).
while rightLength < rightContextLength:
rightCorpusIndex += 1
try:
rightLength += self.index.getSentenceLengths()[rightCorpusIndex]
# if the new corpus index falls off the end of the list,
# stop at the end
except IndexError:
rightCorpusIndex -= 1
break
# grab all sentences from the left to right corpus indices,
# then flatten them into a single list of words
sents = self.sentences[leftCorpusIndex:rightCorpusIndex+1]
words = []
for sentence in sents:
for word in sentence:
words.append(word)
# select the appropriate sections of context from the list
# of words
left = words[offset-leftContextLength:offset]
target = words[offset]
right = words[offset+1:offset+1+rightContextLength]
items.append((left, target, right, sentenceNum))
# if context lengths are specified in sentences:
else:
# for each list of (sentence, word offset in sentence) pairs:
for wordList in wordLocs:
# for each list of (sentence, word offset in sentence) pairs:
for sentenceNum, offset in wordList:
# set pointers to the left- and rightmost sentences to be
# looked at to the sentence the target word is in
leftCorpusIndex = sentenceNum
rightCorpusIndex = sentenceNum
# number of words to include in the left context is
# initially everything in the sentence up to the target
leftLength = offset
# number of words to include in the left context is
# initially everything in the sentence after the target
rightLength = self.index.getSentenceLengths()[sentenceNum] - offset - 1
# keep track of the number of sentences included in the
# left/right context
leftSents = 0;
rightSents = 0;
# while we don't have enough sentences in the left context,
# keep decreasing the left corpus index
while leftSents < leftContextLength:
leftCorpusIndex -= 1
# if the new corpus index would fall off the end of the
# list, stop at 0
if(leftCorpusIndex < 0):
leftCorpusIndex = 0
break
leftLength += self.index.getSentenceLengths()[leftCorpusIndex]
offset += self.index.getSentenceLengths()[leftCorpusIndex]
leftSents += 1
# while we don't have enough sentences in the right context,
# keep increasing the right corpus index
while rightSents < rightContextLength:
rightCorpusIndex += 1
try:
rightLength += self.index.getSentenceLengths()[rightCorpusIndex]
rightSents += 1
# if the new corpus index falls off the end of the list,
# stop at the end
except IndexError:
rightCorpusIndex -= 1
break
# grab all sentences from the left to right corpus indices,
# then flatten them into a single list of words
sents = self.sentences[leftCorpusIndex:rightCorpusIndex+1]
words = []
for sentence in sents:
for word in sentence:
words.append(word)
# select the appropriate sections of context from the list
# of words
left = words[0:offset]
target = words[offset]
right = words[offset+1:]
items.append((left, target, right, sentenceNum))
if verbose:
print "Found %d matches for target word..." % len(items)
# sort the concordance
if sort == self.SORT_WORD:
if verbose:
print "Sorting by target word..."
items.sort(key=lambda i:i[1][0].lower())
elif sort == self.SORT_POS:
if verbose:
print "Sorting by target word POS tag..."
items.sort(key=lambda i:i[1][1].lower())
elif sort == self.SORT_NUM:
if verbose:
print "Sorting by sentence number..."
items.sort(key=lambda i:i[3])
elif sort == self.SORT_RIGHT_CONTEXT:
if verbose:
print "Sorting by first word of right context..."
items.sort(key=lambda i:i[2][0][0])
# if any regular expressions have been given for the context, filter
# the concordance using them
filtered = []
filterBool = False
if leftRegexp != None or rightRegexp != None:
filterBool = True
if filterBool:
leftRe=None
rightRe=None
if leftRegexp != None:
if verbose:
print "Filtering on left context..."
leftRe = re.compile(leftRegexp)
if rightRegexp != None:
if verbose:
print "Filtering on right context..."
rightRe = re.compile(rightRegexp)
for item in items:
if self._matches(item, leftRe, rightRe):
filtered.append(item)
if filterBool:
source = filtered
else:
source = items
return source
def format(self, source, contextChars=55, maxKeyLength=0, showWord=True,
showPOS=True, flipWordAndPOS=False, verbose=False):
"""Formats raw concordance output produced by raw().
Displays a concordance in keyword-in-context style format.
@type source: list
@param source: Raw concordance output to format. Expects a list of
([left context], target word, [right context], target
word sentence number) tuples.
@type contextChars number
@param contextChars: Amount of context to show. If set to less than
0, does not limit amount of context shown (may look ugly). Defaults to 55.
@type maxKeyLength: number
@param maxKeyLength: Max number of characters to show for the
target word. If 0 or less, this value is
calculated so as to fully show all target
words. Defaults to 0.
@type showWord: boolean
@param showWord: Whether to show words. Defaults to True.
@type showPOS: boolean
@param showPOS: Whether to show POS tags. Defaults to True.
@type flipWordAndPOS: boolean
@param flipWordAndPOS: If true, displays POS tags first instead of
words (ie prints 'cc/and' instead of 'and/cc'). Defaults to False.
@type verbose: boolean
@param verbose: Displays some extra status information. Defaults
to False.
"""
# flatten lists of tokens into strings
lines = []
maxMiddleLength = -1
# generate intermediate list of string tuples
for line in source:
# flatten left context tokens into a single string, joining words
# and their POS tag with a '/' (if both are shown).
left = ""
for item in line[0]:
if item[0] == "" and item[1] == "":
left = ""
elif showWord and (not showPOS):
left += item[0] + " "
elif (not showWord) and showPOS:
left += item[1] + " "
elif flipWordAndPOS:
left += item[1] + "/" + item[0] + " "
else:
left += "/".join(item) + " "
# flatten target word into a single string, joining the word and
# its POS tag with a '/' (if both are shown).
if showWord and (not showPOS):
middle = line[1][0]
elif (not showWord) and showPOS:
middle = line[1][1]
elif flipWordAndPOS:
middle = line[1][1] + "/" + line[1][0] + " "
else:
middle = "/".join(line[1])
if len(middle) > maxMiddleLength:
maxMiddleLength = len(middle)
# flatten right context tokens into a single string, joining words
# and their POS tag with a '/' (if both are shown).
right = ""
for item in line[2]:
if item[0] == "" and item[1] == "":
right = ""
elif showWord and (not showPOS):
right += item[0] + " "
elif (not showWord) and showPOS:
right += item[1] + " "
elif flipWordAndPOS:
right += item[1] + "/" + item[0] + " "
else:
right += "/".join(item) + " "
num = line[3]
lines.append((middle, left, right, num))
# crop and justify strings to generate KWIC-format output
count = 0
for middle, left, right, num in lines:
# calculate amount of left padding needed
leftPaddingLength = contextChars - len(left)
if leftPaddingLength < 0:
leftPaddingLength = 0
if len(left) > contextChars and contextChars > -1:
left = left[-contextChars:]
left = " "*leftPaddingLength + left
if contextChars > -1:
right = right[0:contextChars]
# add sentence numbers
left = str(num) + ": " + left[len(str(num))+2 : ]
# calculate amount of middle padding needed
if maxKeyLength > 0:
maxMiddleLength = maxKeyLength
lPad = int(ceil(max(maxMiddleLength - len(middle), 0) / 2.0))
rPad = int(floor(max(maxMiddleLength - len(middle), 0) / 2.0))
middle = " "*lPad + middle + " "*rPad
print left + "| " + middle + " | " + right + " "
count += 1
if verbose:
print "\n" + repr(count) + " lines"
def _matches(self, item, leftRe, rightRe):
""" Private method that runs the given regexps over a raw concordance
item and returns whether they match it.
"""
left = item[0]
right = item[2]
# flatten left and right contexts
leftString = ""
for token in left:
leftString += "/".join(token) + " "
rightString = ""
for token in right:
rightString += "/".join(token) + " "
# see if regexps match
ok = True
if leftRe != None and leftRe.match(leftString) == None:
ok = False
if rightRe != None and rightRe.match(rightString) == None:
ok = False
if ok:
return True
else:
return False
class Aggregator(object):
""" Class for aggregating and summarising corpus concordance data.
This class allows one or more sets of concordance data to be summarised and
displayed. This is useful for corpus linguistic tasks like counting the
number of occurences of a particular word and its different POS tags in a
given corpus, or comparing these frequencies across different corpora. It
creates a FreqDist for each set of concordance data, counting how often each
unique entry appears in it.
An example of how to use this class to show the frequency of the five most
common digrams of the form "must/md X/Y" in the Brown Corpus sections a
and g::
concA = IndexConcordance(list(brown.tagged('a')))
rawA = concA.raw(middleRegexp="^must/md$", leftContextLength=0, rightContextLength=1)
concG = IndexConcordance(list(brown.tagged('g')))
rawG = concG.raw(middleRegexp="^must/md$", leftContextLength=0, rightContextLength=1)
agg = Aggregator()
agg.add(rawA, "Brown Corpus A")
agg.add(rawG, "Brown Corpus G")
agg.formatted(showFirstX=5)
Output:
Brown Corpus A
------------------------------
must/md be/be 17
must/md have/hv 5
must/md not/* 3
must/md play/vb 2
must/md ''/'' 1
Brown Corpus G
------------------------------
must/md be/be 38
must/md have/hv 21
must/md ,/, 6
must/md not/* 5
must/md always/rb 3
"""
# text for 'other' row in output tables
_OTHER_TEXT = "<OTHER>"
# text for 'total' row in output tables
_TOTAL_TEXT = "<TOTAL>"
def __init__(self, inputList=None):
""" Constructor.
@type inputList: list
@param inputList: List of (raw concordance data, name) tuples to be
entered into the aggregator. Defaults to None.
"""
self._outputSets = []
if inputList != None:
for (item, n) in inputList:
self.add(item, name=n)
def add(self, raw, name):
""" Adds the given set of raw concordance output to the aggregator.
@type raw: list
@param raw: Raw concordance data (produced by IndexConcordance.raw()).
Expects a list of ([left context], target word,
[right context], target word sentence number) tuples.
@type name: string
@param name: Name to associate with the set of data.
"""
self._outputSets.append((raw, name));
def remove(self, name):
""" Removes all sets of raw concordance output with the given name.
@type name: string
@param name: Name of data set to remove.
"""
for item in self._outputSets:
if item[1] == name:
self._outputSets.remove(item)
def formatted(self, useWord=True, usePOS=True, normalise=False,
threshold=-1, showFirstX=-1, decimalPlaces=4,
countOther=False, showTotal=False):
""" Displays formatted concordance summary information.
This is a convenience method that combines raw() and display()'s
options. Unless you need raw output, this is probably the most useful
method.
@type useWord: boolean
@param useWord: Include the words in the count. Defaults to True.
@type usePOS: boolean
@param usePOS: Include the POS tags in the count. Defaults to
False.
@type normalise: boolean
@param normalise: If true, normalises the frequencies for each set
of concordance output by dividing each key's frequency by the total
number of samples in that concordances's FreqDist. Allows easier
comparison of results between data sets. Care must be taken when
combining this option with the threshold option, as any threshold
of 1 or more will prevent any output being displayed. Defaults to
False.
@type threshold: number
@param threshold: Frequency display threshold. Results below this
frequency will not be displayed. If less than 0, everything will be
displayed. Defaults to -1.
@type showFirstX: number
@param showFirstX: Only show this many results, starting with the
most frequent. If less than 0, everything will be displayed.
Defaults to -1.
@type decimalPlaces: integer
@param decimalPlaces: Number of decimal places of accuracy to
display. Used when displaying non-integers with the normalise
option. Defaults to 4.
@type countOther: boolean
@param countOther: If true, any samples not shown (due to their
frequency being below the given thershold or because they were
after the number of results specified by the showFirstX argument)
will be combined into one sample. This sample's frequency is the
sum of all unshown sample's frequencies. Defaults to False.
@type showTotal: boolean
@param showTotal: If true, prints the sum of all frequencies (of
the entire FreqDist, not just of the samples displayed.) Defaults
to False.
"""
output, maxKeyLength = self.raw(useWord, usePOS)
self.format(output, maxKeyLength, threshold, showFirstX,
decimalPlaces, normalise, countOther, showTotal)
def raw(self, useWord=True, usePOS=True):
""" Generates raw summary information.
Creates a FreqDist for each set of concordance output and uses it to
count the frequency of each line in it. The concordance output is
flattened from lists of tokens to strings, as lists cannot be hashed.
The list of FreqDists is returned, as well as the length of the longest
string (used for formatted display).
@type useWord: boolean
@param useWord: Include the words in the count. Defaults to True.
@type usePOS: boolean
@param usePOS: Include the POS tags in the count. Defaults to
False.
@rtype: list, number
@returns: A list of (FreqDist, name) pairs, and the length of the
longest key in all the FreqDists.
"""
output = []
maxKeyLength = 0
# for each set of raw concordance data:
for (rawConcOutput, name) in self._outputSets:
# initialise a FreqDist
dist = FreqDist()
# for each item in the raw concordance output:
for (left, middle, right, num) in rawConcOutput:
# flatten the lists of tokens so they can be hashed in
# the FreqDist
leftList = []
for word in left:
if usePOS == False and useWord == True:
leftList.append(word[0].lower())
elif usePOS == True and useWord == False:
leftList.append(word[1].lower())
else:
leftList.append(word[0].lower() + "/" + word[1].lower())
try:
if usePOS == False and useWord == True:
midString = middle[0].lower()
elif usePOS == True and useWord == False:
midString = middle[1].lower()
else:
midString = middle[0].lower() + "/" + middle[1].lower()
except IndexError:
midString = ""
rightList = []
for word in right:
if usePOS == False and useWord == True:
rightList.append(word[0].lower())
elif usePOS == True and useWord == False:
rightList.append(word[1].lower())
else:
rightList.append(word[0].lower() + "/" + word[1].lower())
# join the tokens together to form a key string
key = " ".join(leftList) + " " + midString + " " + " ".join(rightList)
# keep track of the longest key length
if len(key) > maxKeyLength:
maxKeyLength = len(key)
# increment the FreqDist's count for this key
dist.inc(key)
# add this FreqDist and name to the output
output.append((dist, name))
# return the output and maximum key length
return output, maxKeyLength
def format(self, output, maxKeyLength=20, threshold=-1, showFirstX=-1,
decimalPlaces=4, normalise=False, countOther=False,
showTotal=False):
""" Displays concordance summary information.
Formats and displays information produced by raw().
@type output: list
@param output: List of (FreqDist, name) pairs (as produced by raw()).
@type maxKeyLength: number
@param maxKeyLength: Length of longest key. Defaults to 20.
@type normalise: boolean
@param normalise: If true, normalises the frequencies for each set
of concordance output by dividing each key's frequency by the total
number of samples in that concordances's FreqDist. Allows easier
comparison of results between data sets. Care must be taken when
combining this option with the threshold option, as any threshold
of 1 or more will prevent any output being displayed. Defaults to
False.
@type threshold: number
@param threshold: Frequency display threshold. Results below this
frequency will not be displayed. If less than 0, everything will be
displayed. Defaults to -1.
@type showFirstX: number
@param showFirstX: Only show this many results, starting with the
most frequent. If less than 0, everything will be displayed.
Defaults to -1.
@type decimalPlaces: integer
@param decimalPlaces: Number of decimal places of accuracy to
display. Used when displaying non-integers with the normalise
option. Defaults to 4.
@type countOther: boolean
@param countOther: If true, any samples not shown (due to their
frequency being below the given thershold or because they were
after the number of results specified by the showFirstX argument)
will be combined into one sample. This sample's frequency is the
sum of all unshown sample's frequencies. Defaults to False.
@type showTotal: boolean
@param showTotal: If true, prints the sum of all frequencies (of
the entire FreqDist, not just of the samples displayed.) Defaults
to False.
"""
# for each FreqDist:
for (dist, name) in output:
x = 0
other = 0
total = 0
print name
print "-"*(maxKeyLength + 7)
# for each key:
for key in dist.sorted_samples():
# keep track of how many samples shown, if using the showFirstX
# option
#if showFirstX > 0 and x >= showFirstX:
# break
# get and format the sample's frequency
if normalise:
count = 1.0 * dist.count(key) / dist.N()
countString = str(count)[0:decimalPlaces + 2]
else:
count = dist.count(key)
countString = str(count)
total += count
# if the count is less than the threshold value, or we've
# already shown X samples, add this sample's frequency to the
# 'other' bin
if count < threshold or (showFirstX > 0 and x >= showFirstX):
other += count
else:
print key + " "*(maxKeyLength - len(key) + 1) + countString
x += 1
if countOther:
if normalise:
count = 1.0 * other
countString = str(count)[0:decimalPlaces + 2]
else:
count = other
countString = str(count)
print self._OTHER_TEXT + " "*(maxKeyLength - len(self._OTHER_TEXT) + 1) + countString
if showTotal:
if normalise:
count = 1.0 * total
countString = str(count)[0:decimalPlaces + 2]
else:
count = total
countString = str(count)
print self._TOTAL_TEXT + " "*(maxKeyLength - len(self._TOTAL_TEXT) + 1) + countString
print ""
def demo():
"""
Demonstrates how to use IndexConcordance and Aggregator.
"""
print "Reading Brown Corpus into memory..."
corpus = list(brown.tagged(('a','j')))
print "Generating index..."
ic = IndexConcordance(corpus)
print "Showing all occurences of 'plasma' in the Brown Corpus..."
ic.formatted(middleRegexp="^plasma/.*", verbose=True)
print "Investigating the collocates of 'deal' and derivatives..."
agg = Aggregator()
agg.add(ic.raw(middleRegexp="^deal", leftContextLength=1, rightContextLength=0,
leftRegexp="^(\w|\s|/)*$"), "Brown Corpus 'deal' left collocates")
agg.add(ic.raw(middleRegexp="^deal", leftContextLength=0, rightContextLength=1,
rightRegexp="^(\w|\s|/)*$"), "Brown Corpus 'deal' right collocates")
agg.formatted(showFirstX=5, usePOS=False)
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# Trevor Cohn <tacohn@csse.unimelb.edu.au>
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
"""
Functions for tokenizing a text, based on a regular expression
which matches tokens or gaps.
"""
import re, sre_parse, sre_constants, sre_compile
WHITESPACE = r'\s+'
NEWLINE = r'\n'
BLANKLINE = r'\s*\n\s*\n\s*'
WORDPUNCT = r'[a-zA-Z]+|[^a-zA-Z\s]+'
SHOEBOXSEP = r'^\\'
TREEBANK = r'^\(.*?(?=^\(|\Z)'
def _remove_group_identifiers(parsed_re):
"""
Modifies the given parsed regular expression, replacing all groupings
(as indicated by parenthesis in the regular expression string) with
non-grouping variants (indicated with '(?:...)'). This works on the
output of sre_parse.parse, modifing the group indentifier in
SUBPATTERN structures to None.
@param parsed_re: the output of sre_parse.parse(string)
@type parsed_re: C{SubPattern}
"""
if isinstance(parsed_re, sre_parse.SubPattern):
# If it's a SubPattern, replace each item with its processed
# equivalent. These classes are mutable, so that in-place
# modification is allowed.
for i in range(len(parsed_re)):
parsed_re[i] = _remove_group_identifiers(parsed_re[i])
return parsed_re
elif isinstance(parsed_re, list) or isinstance(parsed_re, tuple):
# Otherwise, if it's a sequence, check for the tell-tale
# SUBPATTERN item and repair the sub item if needed
to_process = list(parsed_re)
if to_process[0] == sre_constants.SUBPATTERN:
# replace next int with None
sub_item = list(to_process[1])
sub_item[0] = None
to_process[1] = tuple(sub_item)
# Process each item, in the case of nested SUBPATTERNS
processed = map(_remove_group_identifiers, to_process)
# Coerce back into the original type
if isinstance(parsed_re, list):
return processed
else:
return tuple(processed)
else:
# Don't need to do anything to other types
return parsed_re
# Replace any grouping parentheses with non-grouping ones. We
# need to do this, because the list returned by re.sub will
# contain an element corresponding to every set of grouping
# parentheses. We must not touch escaped parentheses, and
# need to handle the case of escaped escapes (e.g. "\\(").
# We also need to handle nested parentheses, which means our
# regexp contexts must be zero-width. There are also issues with
# parenthesis appearing in bracketed contexts, hence we've
# operated on the intermediate parse structure from sre_parse.
def _compile(regexp):
parsed = sre_parse.parse(regexp)
parsed = _remove_group_identifiers(parsed)
# Add grouping parentheses around the regexp; this will allow
# us to access the material that was split on.
# Need to set the Pattern to expect a single group
pattern = sre_parse.Pattern()
pattern.groups += 1
grouped = sre_parse.SubPattern(pattern)
grouped.append((sre_constants.SUBPATTERN, (1, parsed)))
return sre_compile.compile(grouped, re.UNICODE | re.MULTILINE | re.DOTALL)
def token_split(text, pattern, advanced=False):
"""
@return: An iterator that generates tokens and the gaps between them
"""
if advanced:
regexp = _compile(pattern) # pattern contains ()
else:
regexp = re.compile(pattern, re.UNICODE | re.MULTILINE | re.DOTALL)
# If it's a single string, then convert it to a tuple
# (which we can iterate over, just like an iterator.)
if isinstance(text, (str, unicode)):
text = (text,)
# Process each substring returned by the iterator, in turn.
# "leftover" is used to record any leftover material when we
# move on to a new substring.
leftover = ''
offset = 0
for substring in text:
position = 0 # The position within the substring
# Skip any matching material in the substring:
match = regexp.match(substring)
if match:
yield leftover+substring[position:match.start()]
yield substring[match.start():match.end()]
position = match.end()
leftover = ''
# Walk through the substring, looking for matches.
while position < len(substring):
match = regexp.search(substring, position)
if match:
yield leftover+substring[position:match.start()]
yield substring[match.start():match.end()]
position = match.end()
leftover = ''
else:
leftover = substring[position:]
break
# Update the offset
offset += position
# If the last string had leftover, then return it.
if leftover:
yield leftover
def regexp(text, pattern, gaps=False, advanced=False):
"""
Tokenize the text according to the regular expression pattern.
@param text: the string or string iterator to be tokenized
@type text: C{string} or C{iter(string)}
@param pattern: the regular expression
@type pattern: C{string}
@param gaps: set to True if the pattern matches material between tokens
@type gaps: C{boolean}
@param advanced: set to True if the pattern is complex, making use of () groups
@type advanced: C{boolean}
@return: An iterator over tokens
"""
for (i,token) in enumerate(token_split(text, pattern, advanced)):
if ((i%2==0) == gaps and token != ''):
yield token
def whitespace(s):
"""
Tokenize the text at whitespace.
@param s: the string or string iterator to be tokenized
@type s: C{string} or C{iter(string)}
@return: An iterator over tokens
"""
return regexp(s, pattern=WHITESPACE, gaps=True)
def line(s):
"""
Tokenize the text into lines.
@param s: the string or string iterator to be tokenized
@type s: C{string} or C{iter(string)}
@return: An iterator over tokens
"""
return regexp(s, pattern=NEWLINE, gaps=True)
def blankline(s):
"""
Tokenize the text into paragraphs (separated by blank lines).
@param s: the string or string iterator to be tokenized
@type s: C{string} or C{iter(string)}
@return: An iterator over tokens
"""
return regexp(s, pattern=BLANKLINE, gaps=True)
def wordpunct(s):
"""
Tokenize the text into sequences of alphabetic and non-alphabetic
characters. E.g. "She said 'hello.'" would be tokenized to
["She", "said", "'", "hello", ".'"]
@param s: the string or string iterator to be tokenized
@type s: C{string} or C{iter(string)}
@return: An iterator over tokens
"""
return regexp(s, pattern=WORDPUNCT)
def shoebox(s):
"""
Tokenize a Shoebox entry into its fields (separated by backslash markers).
@param s: the string or string iterator to be tokenized
@type s: C{string} or C{iter(string)}
@return: An iterator over tokens
"""
return regexp(s, pattern=SHOEBOXSEP, gaps=True)
def treebank(s):
"""
Tokenize a Treebank file into its tree strings
@param s: the string or string iterator to be tokenized
@type s: C{string} or C{iter(string)}
@return: An iterator over tokens
"""
return regexp(s, pattern=TREEBANK, advanced=True)
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _display(tokens):
"""
A helper function for L{demo} that displays a list of tokens.
"""
str = ' '+`list(tokens)`+' ' # an indented string representation
str = re.sub(r"(.{,70})\s", r'\1\n ', str).rstrip() # wrap at 70 characters
# Truncate after three lines:
str = re.sub(r'(.+\n.+\n.+)\s\S+\n[\s\S]+(?!$)', r'\1 ...]', str)
print str
def demo():
"""
A demonstration that shows the output of several different
tokenizers on the same string.
"""
from en.parser.nltk_lite import tokenize
# Define the test string.
s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
print 'Input text:'
print `s`
print
print 'Tokenize using whitespace:'
_display(tokenize.whitespace(s))
print
print 'Tokenize sequences of alphanumeric characters:'
_display(tokenize.regexp(s, pattern=r'\w+', gaps=False))
print
print 'Tokenize sequences of letters and sequences of nonletters:'
_display(tokenize.wordpunct(s))
print
print 'Tokenize by lines:'
_display(tokenize.line(s))
print
print 'Tokenize by blank lines:'
_display(tokenize.blankline(s))
print
print 'A simple sentence tokenizer:'
_display(tokenize.regexp(s, pattern=r'\.(\s+|$)', gaps=True))
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Simple Tokenizers
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# Trevor Cohn <tacohn@csse.unimelb.edu.au>
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
"""
Functions for tokenizing a text, based on a regular expression
which matches tokens or gaps.
"""
SPACE = ' '
NEWLINE = '\n'
BLANKLINE = '\n\n'
SHOEBOXSEP = r'^\\'
def space(s):
"""
Tokenize the text at a single space character.
@param s: the string or string iterator to be tokenized
@type s: C{string} or C{iter(string)}
@return: An iterator over tokens
"""
return s.split(SPACE)
def line(s):
"""
Tokenize the text into lines.
@param s: the string or string iterator to be tokenized
@type s: C{string} or C{iter(string)}
@return: An iterator over tokens
"""
return s.split(NEWLINE)
def blankline(s):
"""
Tokenize the text into paragraphs (separated by blank lines).
@param s: the string or string iterator to be tokenized
@type s: C{string} or C{iter(string)}
@return: An iterator over tokens
"""
return s.split(BLANKLINE)
def shoebox(s):
"""
Tokenize a Shoebox entry into its fields (separated by backslash markers).
@param s: the string or string iterator to be tokenized
@type s: C{string} or C{iter(string)}
@return: An iterator over tokens
"""
return s.split(SHOEBOXSEP)
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def demo():
"""
A demonstration that shows the output of several different
tokenizers on the same string.
"""
# Define the test string.
s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
print 'Input text:'
print `s`
print
print 'Tokenize using individual space characters:'
print list(space(s))
print
print 'Tokenize by lines:'
print list(line(s))
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (minor additions)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Functions for tokenizing text.
"""
from simple import *
from regexp import *
| Python |
# Natural Language Toolkit: Penn Treebank Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.tag import tag2tuple
from en.parser.nltk_lite.parse import tree
import os
"""
Penn Treebank corpus sample: tagged, NP-chunked, and parsed data from
Wall Street Journal for 3700 sentences.
This is a ~10% fragment of the Wall Street Journal section of the Penn
Treebank, (C) LDC 1995. It is distributed with the Natural Language Toolkit
under the terms of the Creative Commons Attribution-NonCommercial-ShareAlike License
[http://creativecommons.org/licenses/by-nc-sa/2.5/].
Raw:
Pierre Vinken, 61 years old, will join the board as a nonexecutive
director Nov. 29.
Tagged:
Pierre/NNP Vinken/NNP ,/, 61/CD years/NNS old/JJ ,/, will/MD join/VB
the/DT board/NN as/IN a/DT nonexecutive/JJ director/NN Nov./NNP 29/CD ./.
NP-Chunked:
[ Pierre/NNP Vinken/NNP ]
,/,
[ 61/CD years/NNS ]
old/JJ ,/, will/MD join/VB
[ the/DT board/NN ]
as/IN
[ a/DT nonexecutive/JJ director/NN Nov./NNP 29/CD ]
./.
Parsed:
( (S
(NP-SBJ
(NP (NNP Pierre) (NNP Vinken) )
(, ,)
(ADJP
(NP (CD 61) (NNS years) )
(JJ old) )
(, ,) )
(VP (MD will)
(VP (VB join)
(NP (DT the) (NN board) )
(PP-CLR (IN as)
(NP (DT a) (JJ nonexecutive) (NN director) ))
(NP-TMP (NNP Nov.) (CD 29) )))
(. .) ))
"""
def parsed(files = 'parsed'):
"""
@param files: One or more treebank files to be processed
@type files: L{string} or L{tuple(string)}
@rtype: iterator over L{tree}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "treebank", file)
s = open(path).read()
for t in tokenize.blankline(s):
yield tree.bracket_parse(t)
def chunked(files = 'chunked'):
"""
@param files: One or more treebank files to be processed
@type files: L{string} or L{tuple(string)}
@rtype: iterator over L{tree}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "treebank", file)
s = open(path).read()
for t in tokenize.blankline(s):
yield tree.chunk(t)
def tagged(files = 'chunked'):
"""
@param files: One or more treebank files to be processed
@type files: L{string} or L{tuple(string)}
@rtype: iterator over L{list(tuple)}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "treebank", file)
f = open(path).read()
for sent in tokenize.blankline(f):
l = []
for t in tokenize.whitespace(sent):
if (t != '[' and t != ']'):
l.append(tag2tuple(t))
yield l
def raw(files = 'raw'):
"""
@param files: One or more treebank files to be processed
@type files: L{string} or L{tuple(string)}
@rtype: iterator over L{list(string)}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "treebank", file)
f = open(path).read()
for sent in tokenize.blankline(f):
l = []
for t in tokenize.whitespace(sent):
l.append(t)
yield l
def demo():
from en.parser.nltk_lite.corpora import treebank
from itertools import islice
print "Parsed:"
for tree in islice(treebank.parsed(), 3):
print tree.pp()
print
print "Chunked:"
for tree in islice(treebank.chunked(), 3):
print tree.pp()
print
print "Tagged:"
for sent in islice(treebank.tagged(), 3):
print sent
print
print "Raw:"
for sent in islice(treebank.raw(), 3):
print sent
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Genesis Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
The Genesis Corpus.
This corpus has been prepared from several web sources; formatting,
markup and verse numbers have been stripped.
english-kjv - Genesis, King James version (Project Gutenberg)
english-web - Genesis, World English Bible (Project Gutenberg)
french - Genesis, Louis Segond 1910
german - Genesis, Luther Translation
swedish - Genesis, Gamla och Nya Testamentet, 1917 (Project Runeberg)
finnish - Genesis, Suomen evankelis-luterilaisen kirkon kirkolliskokouksen vuonna 1992 käyttöön ottama suomennos
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
import os
items = [
'english-kjv',
'english-web',
'french',
'german',
'swedish',
'finnish']
item_name = {
'english-kjv': 'Genesis, King James version (Project Gutenberg)',
'english-web': 'Genesis, World English Bible (Project Gutenberg)',
'french': 'Genesis, Louis Segond 1910',
'german': 'Genesis, Luther Translation',
'swedish': 'Genesis, Gamla och Nya Testamentet, 1917 (Project Runeberg)',
'finnish': 'Genesis, Suomen evankelis-luterilaisen kirkon kirkolliskokouksen vuonna 1992 käyttöön ottama suomennos'
}
def raw(files = 'english-kjv'):
"""
@param files: One or more treebank files to be processed
@type files: L{string} or L{tuple(string)}
@rtype: iterator over L{tree}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "genesis", file+".txt")
s = open(path).read()
for t in tokenize.whitespace(s):
yield t
def demo():
from en.parser.nltk_lite.corpora import genesis
from itertools import islice
print 'English:'
for word in islice(genesis.raw(), 27):
print word,
print
print 'Finnish:'
for word in islice(genesis.raw('finnish'), 27):
print word,
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Stopwords Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read tokens from the Stopwords Corpus.
"""
from en.parser.nltk_lite.corpora import get_basedir
import os
items = ['danish', 'dutch', 'english', 'french', 'german', 'italian',
'norwegian', 'portuguese', 'russian', 'spanish', 'swedish']
item_name = {
'danish': 'Danish stopwords',
'dutch': 'Dutch stopwords',
'english': 'English stopwords',
'french': 'French stopwords',
'german': 'German stopwords',
'italian': 'Italian stopwords',
'norwegian': 'Norwegian stopwords',
'portuguese': 'Portuguese stopwords',
'russian': 'Russian stopwords',
'spanish': 'Spanish stopwords',
'swedish': 'Swedish stopwords',
}
def raw(files = 'english'):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "stopwords", file)
for word in open(path).readlines():
yield word.strip()
def demo():
from en.parser.nltk_lite.corpora import stopwords
from itertools import islice
from pprint import pprint
print "20 English stopwords"
pprint(list(islice(stopwords.raw(), 0, 20)))
print "20 Danish stopwords"
pprint(list(islice(stopwords.raw('danish'), 0, 20)))
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Presidential State of the Union Addres Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
C-Span State of the Union Address Corpus
Annual US presidential addresses 1945-2005
http://www.c-span.org/executive/stateoftheunion.asp
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
import os, re
items = [
'1945-Truman',
'1946-Truman',
'1947-Truman',
'1948-Truman',
'1949-Truman',
'1950-Truman',
'1951-Truman',
'1953-Eisenhower',
'1954-Eisenhower',
'1955-Eisenhower',
'1956-Eisenhower',
'1957-Eisenhower',
'1958-Eisenhower',
'1959-Eisenhower',
'1960-Eisenhower',
'1961-Kennedy',
'1962-Kennedy',
'1963-Johnson',
'1963-Kennedy',
'1964-Johnson',
'1965-Johnson-1',
'1965-Johnson-2',
'1966-Johnson',
'1967-Johnson',
'1968-Johnson',
'1969-Johnson',
'1970-Nixon',
'1971-Nixon',
'1972-Nixon',
'1973-Nixon',
'1974-Nixon',
'1975-Ford',
'1976-Ford',
'1977-Ford',
'1978-Carter',
'1979-Carter',
'1980-Carter',
'1981-Reagan',
'1982-Reagan',
'1983-Reagan',
'1984-Reagan',
'1985-Reagan',
'1986-Reagan',
'1987-Reagan',
'1988-Reagan',
'1989-Bush',
'1990-Bush',
'1991-Bush-1',
'1991-Bush-2',
'1992-Bush',
'1993-Clinton',
'1994-Clinton',
'1995-Clinton',
'1996-Clinton',
'1997-Clinton',
'1998-Clinton',
'1999-Clinton',
'2000-Clinton',
'2001-Bush-1',
'2001-Bush-2',
'2002-Bush',
'2003-Bush',
'2004-Bush',
'2005-Bush'
]
def raw(files = items):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "state_union", file + ".txt")
f = open(path)
preamble = True
text = f.read()
for t in tokenize.wordpunct(text):
yield t
def demo():
from en.parser.nltk_lite.corpora import state_union
for speech in state_union.items:
year = speech[:4]
freq = list(state_union.raw(speech)).count('men')
print year, freq
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Sinica Treebank Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.tag import tag2tuple
from en.parser.nltk_lite.parse import tree
import os
"""
Sinica Treebank Corpus Sample
http://rocling.iis.sinica.edu.tw/CKIP/engversion/treebank.htm
10,000 parsed sentences, drawn from the Academia Sinica Balanced
Corpus of Modern Chinese. Parse tree notation is based on
Information-based Case Grammar.
Language and Knowledge Processing Group, Institute of Information
Science, Academia Sinica
It is distributed with the Natural Language Toolkit under the terms of
the Creative Commons Attribution-NonCommercial-ShareAlike License
[http://creativecommons.org/licenses/by-nc-sa/2.5/].
References:
Feng-Yi Chen, Pi-Fang Tsai, Keh-Jiann Chen, and Chu-Ren Huang (1999)
The Construction of Sinica Treebank. Computational Linguistics and
Chinese Language Processing, 4, pp 87-104.
Huang Chu-Ren, Keh-Jiann Chen, Feng-Yi Chen, Keh-Jiann Chen, Zhao-Ming
Gao, and Kuang-Yu Chen. 2000. Sinica Treebank: Design Criteria,
Annotation Guidelines, and On-line Interface. Proceedings of 2nd
Chinese Language Processing Workshop, Association for Computational
Linguistics.
Chen Keh-Jiann and Yu-Ming Hsieh (2004) Chinese Treebanks and Grammar
Extraction, Proceedings of IJCNLP-04, pp560-565.
"""
def parsed(files = 'parsed'):
"""
@param files: One or more treebank files to be processed
@type files: L{string} or L{tuple(string)}
@rtype: iterator over L{tree}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "sinica_treebank", file)
for sent in open(path).readlines():
yield tree.bracket_parse(sent)
def raw(files = 'raw'):
"""
@param files: One or more treebank files to be processed
@type files: L{string} or L{tuple(string)}
@rtype: iterator over L{list(string)}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "sinica_treebank", file)
for line in open(path).readlines():
yield line.split()[1:]
def demo():
from en.parser.nltk_lite.corpora import sinica_treebank
from itertools import islice
# print "Parsed:"
# for tree in islice(sinica_treebank.parsed(), 3):
# print tree.pp()
# print
print "Raw:"
for sent in islice(sinica_treebank.raw(), 3):
print sent
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Senseval 2 Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# Steven Bird <sb@csse.unimelb.edu.au> (modifications)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read from the Senseval 2 Corpus.
SENSEVAL [http://www.senseval.org/]
Evaluation exercises for Word Sense Disambiguation.
Organized by ACL-SIGLEX [http://www.siglex.org/]
Prepared by Ted Pedersen <tpederse@umn.edu>, University of Minnesota,
http://www.d.umn.edu/~tpederse/data.html
Distributed with permission.
The NLTK version of the Senseval 2 files uses well-formed XML.
Each instance of the ambiguous words "hard", "interest", "line", and "serve"
is tagged with a sense identifier, and supplied with context.
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
import os, re, xml.sax
items = ["hard", "interest", "line", "serve"]
class SensevalParser(xml.sax.ContentHandler):
def __init__(self, buffer_size=1024):
xml.sax.ContentHandler.__init__(self)
self._lemma = ''
self._buffer_size = buffer_size
self.reset()
def parse(self, text):
if hasattr(text, '__iter__') and hasattr(text, 'next'):
text = ''.join(text)
parser = xml.sax.make_parser()
parser.setContentHandler(self)
current = 0
while current < len(text):
buffer = text[current : current + self._buffer_size]
parser.feed(buffer)
for instance in self._instances:
yield instance
self.reset(True, False)
current += self._buffer_size
parser.close()
def characters(self, ch):
self._data += _to_ascii(ch)
def startElement(self, tag, attr):
if tag == 'wf':
self._pos = _to_ascii(attr.getValueByQName('pos'))
elif tag == 'answer':
instance_id = _to_ascii(attr.getValueByQName('instance'))
self._senses.append(_to_ascii(attr.getValueByQName('senseid')))
self._iloc = instance_id
elif tag == 'context':
self._data = ''
elif tag == 'lexelt':
self._lemma = _to_ascii(attr.getValueByQName('item'))
elif tag == 'head':
self._head = self._wnum - 1
def endElement(self, tag):
if tag == 'wf':
text = self._data.strip()
pos = self._pos
self._tokens.append((text, pos))
self._wnum += 1
self._data = ''
elif tag == 'context':
self._instances.append((tuple(self._senses), self._head, self._tokens))
self.reset(False)
def instances(self):
return self._instances
def reset(self, instances=True, state=True):
if instances:
self._instances = []
if state:
self._senses = []
self._head = None
self._data = ''
self._wnum = 1
self._iloc = None
self._tokens = []
self._pos = None
def _to_ascii(text):
return text.encode('Latin-1')
def raw(files = items):
"""
@param files: One or more Senseval files to be processed
@type files: L{string} or L{tuple(string)}
@rtype: iterator over L{tuple}
"""
if type(files) is str: files = (files,)
parser = SensevalParser()
for file in files:
path = os.path.join(get_basedir(), "senseval", file+".pos")
f = open(path).read()
for entry in parser.parse(f):
yield entry
def demo():
from en.parser.nltk_lite.corpora import senseval
from itertools import islice
# Print one example of each sense
seen = set()
for (senses, position, context) in senseval.raw('line'):
if senses not in seen:
seen.add(senses)
print "senses:", senses
print "position:", position
print "context:", ' '.join(['%s/%s' % ttok for ttok in context])
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Names Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Names Corpus, Version 1.3 (1994-03-29)
Copyright (C) 1991 Mark Kantrowitz
Additions by Bill Ross
This corpus contains 5001 female names and 2943 male names, sorted
alphabetically, one per line.
(Used in NLTK with permission. See the README file for details.)
"""
from en.parser.nltk_lite.corpora import get_basedir
import os
items = ['female', 'male']
item_name = {
'female': 'Female names',
'male': 'Male names'
}
def raw(files = ['female', 'male']):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "names", file+".txt")
for word in open(path).readlines():
yield word.strip()
def demo():
from en.parser.nltk_lite.corpora import names
from random import shuffle
from pprint import pprint
print "20 female names"
female = list(names.raw('female'))
shuffle(female)
pprint(female[:20])
print "20 male names"
male = list(names.raw('male'))
shuffle(male)
pprint(male[:20])
if __name__ == '__main__':
demo()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Natural Language Toolkit: Toolbox Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Greg Aumann <greg_aumann@sil.org>
# Stuart Robinson <Stuart.Robinson@mpi.nl>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Module for reading, writing and manipulating Toolbox databases.
"""
import os, re
from en.parser.nltk_lite.corpora import get_basedir
from string import split
from itertools import imap
from StringIO import StringIO
from en.parser.nltk_lite.etree.ElementTree import TreeBuilder, Element
class StandardFormat(object):
"""
Class for reading and processing standard format marker files and strings.
"""
def open(self, sfm_file):
"""Open a standard format marker file for sequential reading.
@param sfm_file: name of the standard format marker input file
@type sfm_file: string
"""
self._file = file(sfm_file, 'rU')
def open_string(self, s):
"""Open a standard format marker string for sequential reading.
@param s: string to parse as a standard format marker input file
@type s: string
"""
self._file = StringIO(s)
def raw_fields(self):
"""Return an iterator for the fields in the standard format marker
file.
@return: an iterator that returns the next field in a (marker, value)
tuple. Linebreaks and trailing white space are preserved except
for the final newline in each field.
@rtype: iterator over C{(marker, value)} tuples
"""
join_string = '\n'
line_pat = re.compile(r'^(?:\\(\S+)\s*)?(.*)$')
# need to get first line outside the loop for correct handling
# of the first marker if it spans multiple lines
file_iter = iter(self._file)
line = file_iter.next()
mobj = re.match(line_pat, line)
mkr, line_value = mobj.groups()
value_lines = [line_value,]
self.line_num = 0
for line in file_iter:
self.line_num += 1
mobj = re.match(line_pat, line)
line_mkr, line_value = mobj.groups()
if line_mkr:
yield (mkr, join_string.join(value_lines))
mkr = line_mkr
value_lines = [line_value,]
else:
value_lines.append(line_value)
self.line_num += 1
yield (mkr, join_string.join(value_lines))
def fields(self, strip=True, unwrap=True, encoding=None, errors='strict', unicode_fields=None):
"""Return an iterator for the fields in the standard format marker file.
@param strip: strip trailing whitespace from the last line of each field
@type strip: boolean
@param unwrap: Convert newlines in a field to spaces.
@type unwrap: boolean
@param encoding: Name of an encoding to use. If it is specified then
the C{fields} method returns unicode strings rather than non
unicode strings.
@type encoding: string or None
@param errors: Error handling scheme for codec. Same as the C{decode}
inbuilt string method.
@type errors: string
@param unicode_fields: Set of marker names whose values are UTF-8 encoded.
Ignored if encoding is None. If the whole file is UTF-8 encoded set
C{encoding='utf8'} and leave C{unicode_fields} with its default
value of None.
@type unicode_fields: set or dictionary (actually any sequence that
supports the 'in' operator).
@return: an iterator that returns the next field in a C{(marker, value)}
tuple. C{marker} and C{value} are unicode strings if an C{encoding} was specified in the
C{fields} method. Otherwise they are nonunicode strings.
@rtype: iterator over C{(marker, value)} tuples
"""
if encoding is None and unicode_fields is not None:
raise ValueError, 'unicode_fields is set but not encoding.'
unwrap_pat = re.compile(r'\n+')
for mkr, val in self.raw_fields():
if encoding:
if unicode_fields is not None and mkr in unicode_fields:
val = val.decode('utf8', errors)
else:
val = val.decode(encoding, errors)
mkr = mkr.decode(encoding, errors)
if unwrap:
val = unwrap_pat.sub(' ', val)
if strip:
val = val.rstrip()
yield (mkr, val)
def close(self):
"""Close a previously opened standard format marker file or string."""
self._file.close()
try:
del self.line_num
except AttributeError:
pass
class ToolboxData(StandardFormat):
def __init__(self):
super(ToolboxData, self).__init__()
def parse(self, *args, **kwargs):
return self._record_parse(*args, **kwargs)
def _record_parse(self, key=None, **kwargs):
"""
Returns an element tree structure corresponding to a toolbox data file with
all markers at the same level.
Thus the following Toolbox database::
\_sh v3.0 400 Rotokas Dictionary
\_DateStampHasFourDigitYear
\lx kaa
\ps V.A
\ge gag
\gp nek i pas
\lx kaa
\ps V.B
\ge strangle
\gp pasim nek
after parsing will end up with the same structure (ignoring the extra
whitespace) as the following XML fragment after being parsed by
ElementTree::
<toolbox_data>
<header>
<_sh>v3.0 400 Rotokas Dictionary</_sh>
<_DateStampHasFourDigitYear/>
</header>
<record>
<lx>kaa</lx>
<ps>V.A</ps>
<ge>gag</ge>
<gp>nek i pas</gp>
</record>
<record>
<lx>kaa</lx>
<ps>V.B</ps>
<ge>strangle</ge>
<gp>pasim nek</gp>
</record>
</toolbox_data>
@param key: Name of key marker at the start of each record. If set to
None (the default value) the first marker that doesn't begin with an
underscore is assumed to be the key.
@type key: string
@param kwargs: Keyword arguments passed to L{StandardFormat.fields()}
@type kwargs: keyword arguments dictionary
@rtype: ElementTree._ElementInterface
@return: contents of toolbox data divided into header and records
"""
builder = TreeBuilder()
builder.start('toolbox_data', {})
builder.start('header', {})
in_records = False
for mkr, value in self.fields(**kwargs):
if key is None and not in_records and mkr[0] != '_':
key = mkr
if mkr == key:
if in_records:
builder.end('record')
else:
builder.end('header')
in_records = True
builder.start('record', {})
builder.start(mkr, {})
builder.data(value)
builder.end(mkr)
if in_records:
builder.end('record')
else:
builder.end('header')
builder.end('toolbox_data')
return builder.close()
def parse_corpus(file_name, key=None, **kwargs):
"""
Return an element tree resulting from parsing the toolbox datafile.
A convenience function that creates a C{ToolboxData} object, opens and
parses the toolbox data file. The data file is assumed to be in the toolbox
subdirectory of the directory where NLTK looks for corpora,
see L{corpora.get_basedir()}.
@param file_name: Name of file in toolbox corpus directory
@type file_name: string
@param key: marker at the start of each record
@type key: string
@param kwargs: Keyword arguments passed to L{ToolboxData.parse()}
@type kwargs: keyword arguments dictionary
@rtype: ElementTree._ElementInterface
@return: contents of toolbox data divided into header and records
"""
db = ToolboxData()
db.open(os.path.join(get_basedir(), 'toolbox', file_name))
return db.parse(key, **kwargs)
import re
_is_value = re.compile(r"\S")
def to_sfm_string(tree, encoding=None, errors='strict', unicode_fields=None):
"""Return a string with a standard format representation of the toolbox
data in tree (tree can be a toolbox database or a single record).
@param tree: flat representation of toolbox data (whole database or single record)
@type tree: ElementTree._ElementInterface
@param encoding: Name of an encoding to use.
@type encoding: string
@param errors: Error handling scheme for codec. Same as the C{encode}
inbuilt string method.
@type errors: string
@param unicode_fields:
@type unicode_fields: string
@rtype: string
@return: string using standard format markup
"""
if tree.tag == 'record':
root = Element('toolbox_data')
root.append(tree)
tree = root
if tree.tag != 'toolbox_data':
raise ValueError, "not a toolbox_data element structure"
if encoding is None and unicode_fields is not None:
raise ValueError, \
"if encoding is not specified then neither should unicode_fields"
l = []
for rec in tree:
l.append('\n')
for field in rec:
mkr = field.tag
value = field.text
if encoding is not None:
if unicode_fields is not None and mkr in unicode_fields:
cur_encoding = 'utf8'
else:
cur_encoding = encoding
if re.search(_is_value, value):
l.append((u"\\%s %s\n" % (mkr, value)).encode(cur_encoding, errors))
else:
l.append((u"\\%s%s\n" % (mkr, value)).encode(cur_encoding, errors))
else:
if re.search(_is_value, value):
l.append("\\%s %s\n" % (mkr, value))
else:
l.append("\\%s%s\n" % (mkr, value))
return ''.join(l[1:])
def _parse_record(s):
"""
Deprecated: use C{StandardFormat.fields()}
@param s: toolbox record as a string
@type s: L{string}
@rtype: iterator over L{list(string)}
"""
s = "\n" + s # Fields (even first) must start w/ a carriage return
if s.endswith("\n") : s = s[:-1] # Remove single extra carriage return
for field in split(s, sep="\n\\")[1:] : # Parse by carriage return followed by backslash
parsed_field = split(field, sep=" ", maxsplit=1) # Split properly delineated field
try :
yield (parsed_field[0], parsed_field[1])
except IndexError :
yield (parsed_field[0], '')
def raw(files='rotokas.dic', include_header=False, head_field_marker=None):
"""
Deprecated: use C{StandardFormat.fields()}
@param files: One or more toolbox files to be processed
@type files: L{string} or L{tuple(string)}
@param include_header: flag that determines whether to treat header as record (default is no)
@type include_header: boolean
@param head_field_marker: option for explicitly setting which marker to use as the head field
when parsing the file (default is automatically determining it from
the first field of the first record)
@type head_field_marker: string
@rtype: iterator over L{list(string)}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str : files = (files,)
for file in files:
path = os.path.join(get_basedir(), "toolbox", file)
fc = open(path, "U").read()
if fc.strip().startswith(r"\_") :
(header, body) = split(fc, sep="\n\n", maxsplit=1)
if include_header:
yield list(_parse_record(header))
else :
body = fc
# Deal with head field marker
if head_field_marker :
hfm_with_backslash = "\\" + hfm
else :
ff = split(body, sep="\n", maxsplit=1)[0] # first field
hfm_with_backslash = split(ff, sep=" ", maxsplit=1)[0] # raw marker of first field
recordsep = "\n\n"+hfm_with_backslash # separates records from one another
# Parse records
for r in split("\n\n"+body, sep=recordsep)[1:] :
yield list(_parse_record(hfm_with_backslash + r))
# assumes headwords are unique
def dictionary(files='rotokas.dic', include_header=False) :
"""
Deprecated: use C{ToolboxData.parse()}
@param files: One or more toolbox files to be processed
@type files: L{string} or L{tuple(string)}
@param include_header: treat header as entry?
@type include_header: boolean
@rtype: iterator over L{dict}
"""
return imap(dict, raw(files, include_header))
def _dict_list_entry(entry):
d = {}
for field in entry:
if len(field) == 2:
name, value = field
if name not in d:
d[name] = []
d[name].append(value)
return d
# if two entries have the same headword this key maps to a list of entries
def dict_list(files='rotokas.dic', include_header=False) :
"""
Deprecated: use C{ToolboxData.parse()}
@param files: One or more toolbox files to be processed
@type files: L{string} or L{tuple(string)}
@param include_header: treat header as entry?
@type include_header: boolean
@rtype: iterator over L{dict}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str : files = (files,)
for entry in raw(files, include_header) :
yield _dict_list_entry(entry)
def demo():
from en.parser.nltk_lite.corpora import toolbox
from itertools import islice
from pprint import pprint
print 'Raw:'
pprint(list(islice(toolbox.raw(), 3)))
print 'Dictionary:'
pprint(list(islice(toolbox.dictionary(), 3)))
print 'Dictionary-List:'
pprint(list(islice(toolbox.dict_list(), 3)))
print 'Complex test cases, no header'
pprint(list(toolbox.raw("test.dic")))
print 'Complex test cases, no header, dictionary'
pprint(list(toolbox.dictionary("test.dic")))
print 'Complex test cases, no header, dictionary list'
pprint(list(toolbox.dict_list("test.dic")))
print 'Complex test cases, with header'
pprint(list(toolbox.raw("test.dic", include_header=True)))
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: PP Attachment Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read lines from the Prepositional Phrase Attachment Corpus.
The PP Attachment Corpus contains several files having the format:
sentence_id verb noun1 preposition noun2 attachment
E.g.:
42960 gives authority to administration V
46742 gives inventors of microchip N
The PP attachment is to the verb phrase (V) or noun phrase (N), i.e.:
(VP gives (NP authority) (PP to administration))
(VP gives (NP inventors (PP of microchip)))
The corpus contains the following files:
training: training set
devset: development test set, used for algorithm development.
test: test set, used to report results
bitstrings: word classes derived from Mutual Information
Clustering for the Wall Street Journal.
Ratnaparkhi, Adwait (1994). A Maximum Entropy Model for Prepositional
Phrase Attachment. Proceedings of the ARPA Human Language Technology
Conference. [http://www.cis.upenn.edu/~adwait/papers/hlt94.ps]
The PP Attachment Corpus is distributed with NLTK with the permission
of the author.
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.tag import string2tags, string2words
import os
items = ['training', 'devset', 'test']
item_name = {
'training': 'training set',
'devset': 'development test set',
'test': 'test set'
}
def raw(files = items):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "ppattach", file)
for line in open(path).readlines():
yield tuple(line.split())
def dictionary(files = items):
for t in raw(files):
yield {
'sent': t[0],
'verb': t[1],
'noun1': t[2],
'prep': t[3],
'noun2': t[4],
'attachment': t[5]
}
def demo():
from en.parser.nltk_lite.corpora import ppattach
from itertools import islice
from pprint import pprint
pprint(list(islice(ppattach.raw('training'), 0, 5)))
pprint(list(islice(ppattach.dictionary('training'), 0, 5)))
if __name__ == '__main__':
demo()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Natural Language Toolkit: Toolbox Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Greg Aumann <greg_aumann@sil.org>
# Stuart Robinson <Stuart.Robinson@mpi.nl>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Module for reading, writing and manipulating Toolbox databases.
"""
import os, re
from en.parser.nltk_lite.corpora import get_basedir
from string import split
from itertools import imap
from StringIO import StringIO
from en.parser.nltk_lite.etree.ElementTree import TreeBuilder, Element
class StandardFormat(object):
"""
Class for reading and processing standard format marker files and strings.
"""
def open(self, sfm_file):
"""Open a standard format marker file for sequential reading.
@param sfm_file: name of the standard format marker input file
@type sfm_file: string
"""
self._file = file(sfm_file, 'rU')
def open_string(self, s):
"""Open a standard format marker string for sequential reading.
@param s: string to parse as a standard format marker input file
@type s: string
"""
self._file = StringIO(s)
def raw_fields(self):
"""Return an iterator for the fields in the standard format marker
file.
@return: an iterator that returns the next field in a (marker, value)
tuple. Linebreaks and trailing white space are preserved except
for the final newline in each field.
@rtype: iterator over C{(marker, value)} tuples
"""
join_string = '\n'
line_pat = re.compile(r'^(?:\\(\S+)\s*)?(.*)$')
# need to get first line outside the loop for correct handling
# of the first marker if it spans multiple lines
file_iter = iter(self._file)
line = file_iter.next()
mobj = re.match(line_pat, line)
mkr, line_value = mobj.groups()
value_lines = [line_value,]
self.line_num = 0
for line in file_iter:
self.line_num += 1
mobj = re.match(line_pat, line)
line_mkr, line_value = mobj.groups()
if line_mkr:
yield (mkr, join_string.join(value_lines))
mkr = line_mkr
value_lines = [line_value,]
else:
value_lines.append(line_value)
self.line_num += 1
yield (mkr, join_string.join(value_lines))
def fields(self, strip=True, unwrap=True, encoding=None, errors='strict', unicode_fields=None):
"""Return an iterator for the fields in the standard format marker file.
@param strip: strip trailing whitespace from the last line of each field
@type strip: boolean
@param unwrap: Convert newlines in a field to spaces.
@type unwrap: boolean
@param encoding: Name of an encoding to use. If it is specified then
the C{fields} method returns unicode strings rather than non
unicode strings.
@type encoding: string or None
@param errors: Error handling scheme for codec. Same as the C{decode}
inbuilt string method.
@type errors: string
@param unicode_fields: Set of marker names whose values are UTF-8 encoded.
Ignored if encoding is None. If the whole file is UTF-8 encoded set
C{encoding='utf8'} and leave C{unicode_fields} with its default
value of None.
@type unicode_fields: set or dictionary (actually any sequence that
supports the 'in' operator).
@return: an iterator that returns the next field in a C{(marker, value)}
tuple. C{marker} and C{value} are unicode strings if an C{encoding} was specified in the
C{fields} method. Otherwise they are nonunicode strings.
@rtype: iterator over C{(marker, value)} tuples
"""
if encoding is None and unicode_fields is not None:
raise ValueError, 'unicode_fields is set but not encoding.'
unwrap_pat = re.compile(r'\n+')
for mkr, val in self.raw_fields():
if encoding:
if unicode_fields is not None and mkr in unicode_fields:
val = val.decode('utf8', errors)
else:
val = val.decode(encoding, errors)
mkr = mkr.decode(encoding, errors)
if unwrap:
val = unwrap_pat.sub(' ', val)
if strip:
val = val.rstrip()
yield (mkr, val)
def close(self):
"""Close a previously opened standard format marker file or string."""
self._file.close()
try:
del self.line_num
except AttributeError:
pass
class ToolboxData(StandardFormat):
def __init__(self):
super(ToolboxData, self).__init__()
def parse(self, *args, **kwargs):
return self._record_parse(*args, **kwargs)
def _record_parse(self, key=None, **kwargs):
"""
Returns an element tree structure corresponding to a toolbox data file with
all markers at the same level.
Thus the following Toolbox database::
\_sh v3.0 400 Rotokas Dictionary
\_DateStampHasFourDigitYear
\lx kaa
\ps V.A
\ge gag
\gp nek i pas
\lx kaa
\ps V.B
\ge strangle
\gp pasim nek
after parsing will end up with the same structure (ignoring the extra
whitespace) as the following XML fragment after being parsed by
ElementTree::
<toolbox_data>
<header>
<_sh>v3.0 400 Rotokas Dictionary</_sh>
<_DateStampHasFourDigitYear/>
</header>
<record>
<lx>kaa</lx>
<ps>V.A</ps>
<ge>gag</ge>
<gp>nek i pas</gp>
</record>
<record>
<lx>kaa</lx>
<ps>V.B</ps>
<ge>strangle</ge>
<gp>pasim nek</gp>
</record>
</toolbox_data>
@param key: Name of key marker at the start of each record. If set to
None (the default value) the first marker that doesn't begin with an
underscore is assumed to be the key.
@type key: string
@param kwargs: Keyword arguments passed to L{StandardFormat.fields()}
@type kwargs: keyword arguments dictionary
@rtype: ElementTree._ElementInterface
@return: contents of toolbox data divided into header and records
"""
builder = TreeBuilder()
builder.start('toolbox_data', {})
builder.start('header', {})
in_records = False
for mkr, value in self.fields(**kwargs):
if key is None and not in_records and mkr[0] != '_':
key = mkr
if mkr == key:
if in_records:
builder.end('record')
else:
builder.end('header')
in_records = True
builder.start('record', {})
builder.start(mkr, {})
builder.data(value)
builder.end(mkr)
if in_records:
builder.end('record')
else:
builder.end('header')
builder.end('toolbox_data')
return builder.close()
def parse_corpus(file_name, key=None, **kwargs):
"""
Return an element tree resulting from parsing the toolbox datafile.
A convenience function that creates a C{ToolboxData} object, opens and
parses the toolbox data file. The data file is assumed to be in the toolbox
subdirectory of the directory where NLTK looks for corpora,
see L{corpora.get_basedir()}.
@param file_name: Name of file in toolbox corpus directory
@type file_name: string
@param key: marker at the start of each record
@type key: string
@param kwargs: Keyword arguments passed to L{ToolboxData.parse()}
@type kwargs: keyword arguments dictionary
@rtype: ElementTree._ElementInterface
@return: contents of toolbox data divided into header and records
"""
db = ToolboxData()
db.open(os.path.join(get_basedir(), 'toolbox', file_name))
return db.parse(key, **kwargs)
import re
_is_value = re.compile(r"\S")
def to_sfm_string(tree, encoding=None, errors='strict', unicode_fields=None):
"""Return a string with a standard format representation of the toolbox
data in tree (tree can be a toolbox database or a single record).
@param tree: flat representation of toolbox data (whole database or single record)
@type tree: ElementTree._ElementInterface
@param encoding: Name of an encoding to use.
@type encoding: string
@param errors: Error handling scheme for codec. Same as the C{encode}
inbuilt string method.
@type errors: string
@param unicode_fields:
@type unicode_fields: string
@rtype: string
@return: string using standard format markup
"""
if tree.tag == 'record':
root = Element('toolbox_data')
root.append(tree)
tree = root
if tree.tag != 'toolbox_data':
raise ValueError, "not a toolbox_data element structure"
if encoding is None and unicode_fields is not None:
raise ValueError, \
"if encoding is not specified then neither should unicode_fields"
l = []
for rec in tree:
l.append('\n')
for field in rec:
mkr = field.tag
value = field.text
if encoding is not None:
if unicode_fields is not None and mkr in unicode_fields:
cur_encoding = 'utf8'
else:
cur_encoding = encoding
if re.search(_is_value, value):
l.append((u"\\%s %s\n" % (mkr, value)).encode(cur_encoding, errors))
else:
l.append((u"\\%s%s\n" % (mkr, value)).encode(cur_encoding, errors))
else:
if re.search(_is_value, value):
l.append("\\%s %s\n" % (mkr, value))
else:
l.append("\\%s%s\n" % (mkr, value))
return ''.join(l[1:])
def _parse_record(s):
"""
Deprecated: use C{StandardFormat.fields()}
@param s: toolbox record as a string
@type s: L{string}
@rtype: iterator over L{list(string)}
"""
s = "\n" + s # Fields (even first) must start w/ a carriage return
if s.endswith("\n") : s = s[:-1] # Remove single extra carriage return
for field in split(s, sep="\n\\")[1:] : # Parse by carriage return followed by backslash
parsed_field = split(field, sep=" ", maxsplit=1) # Split properly delineated field
try :
yield (parsed_field[0], parsed_field[1])
except IndexError :
yield (parsed_field[0], '')
def raw(files='rotokas.dic', include_header=False, head_field_marker=None):
"""
Deprecated: use C{StandardFormat.fields()}
@param files: One or more toolbox files to be processed
@type files: L{string} or L{tuple(string)}
@param include_header: flag that determines whether to treat header as record (default is no)
@type include_header: boolean
@param head_field_marker: option for explicitly setting which marker to use as the head field
when parsing the file (default is automatically determining it from
the first field of the first record)
@type head_field_marker: string
@rtype: iterator over L{list(string)}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str : files = (files,)
for file in files:
path = os.path.join(get_basedir(), "toolbox", file)
fc = open(path, "U").read()
if fc.strip().startswith(r"\_") :
(header, body) = split(fc, sep="\n\n", maxsplit=1)
if include_header:
yield list(_parse_record(header))
else :
body = fc
# Deal with head field marker
if head_field_marker :
hfm_with_backslash = "\\" + hfm
else :
ff = split(body, sep="\n", maxsplit=1)[0] # first field
hfm_with_backslash = split(ff, sep=" ", maxsplit=1)[0] # raw marker of first field
recordsep = "\n\n"+hfm_with_backslash # separates records from one another
# Parse records
for r in split("\n\n"+body, sep=recordsep)[1:] :
yield list(_parse_record(hfm_with_backslash + r))
# assumes headwords are unique
def dictionary(files='rotokas.dic', include_header=False) :
"""
Deprecated: use C{ToolboxData.parse()}
@param files: One or more toolbox files to be processed
@type files: L{string} or L{tuple(string)}
@param include_header: treat header as entry?
@type include_header: boolean
@rtype: iterator over L{dict}
"""
return imap(dict, raw(files, include_header))
def _dict_list_entry(entry):
d = {}
for field in entry:
if len(field) == 2:
name, value = field
if name not in d:
d[name] = []
d[name].append(value)
return d
# if two entries have the same headword this key maps to a list of entries
def dict_list(files='rotokas.dic', include_header=False) :
"""
Deprecated: use C{ToolboxData.parse()}
@param files: One or more toolbox files to be processed
@type files: L{string} or L{tuple(string)}
@param include_header: treat header as entry?
@type include_header: boolean
@rtype: iterator over L{dict}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str : files = (files,)
for entry in raw(files, include_header) :
yield _dict_list_entry(entry)
def demo():
from en.parser.nltk_lite.corpora import toolbox
from itertools import islice
from pprint import pprint
print 'Raw:'
pprint(list(islice(toolbox.raw(), 3)))
print 'Dictionary:'
pprint(list(islice(toolbox.dictionary(), 3)))
print 'Dictionary-List:'
pprint(list(islice(toolbox.dict_list(), 3)))
print 'Complex test cases, no header'
pprint(list(toolbox.raw("test.dic")))
print 'Complex test cases, no header, dictionary'
pprint(list(toolbox.dictionary("test.dic")))
print 'Complex test cases, no header, dictionary list'
pprint(list(toolbox.dict_list("test.dic")))
print 'Complex test cases, with header'
pprint(list(toolbox.raw("test.dic", include_header=True)))
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Genesis Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
The Carnegie Mellon Pronouncing Dictionary [cmudict.0.6]
ftp://ftp.cs.cmu.edu/project/speech/dict/
Copyright 1998 Carnegie Mellon University
File Format: Each line consists of an uppercased word, a counter
(for alternative pronunciations), and a transcription. Vowels are
marked for stress (1=primary, 2=secondary, 0=no stress). E.g.:
NATURAL 1 N AE1 CH ER0 AH0 L
The dictionary contains 127069 entries. Of these, 119400 words are assigned
a unique pronunciation, 6830 words have two pronunciations, and 839 words have
three or more pronunciations. Many of these are fast-speech variants.
Phonemes: There are 39 phonemes, as shown below:
Phoneme Example Translation Phoneme Example Translation
------- ------- ----------- ------- ------- -----------
AA odd AA D AE at AE T
AH hut HH AH T AO ought AO T
AW cow K AW AY hide HH AY D
B be B IY CH cheese CH IY Z
D dee D IY DH thee DH IY
EH Ed EH D ER hurt HH ER T
EY ate EY T F fee F IY
G green G R IY N HH he HH IY
IH it IH T IY eat IY T
JH gee JH IY K key K IY
L lee L IY M me M IY
N knee N IY NG ping P IH NG
OW oat OW T OY toy T OY
P pee P IY R read R IY D
S sea S IY SH she SH IY
T tea T IY TH theta TH EY T AH
UH hood HH UH D UW two T UW
V vee V IY W we W IY
Y yield Y IY L D Z zee Z IY
ZH seizure S IY ZH ER
"""
from en.parser.nltk_lite.corpora import get_basedir
import os
items = [
'cmudict']
item_name = {
'cmudict': 'CMU Pronunciation Dictionary, Version 0.6, 1998',
}
def raw(files = 'cmudict'):
"""
@param files: One or more cmudict files to be processed
@type files: L{string} or L{tuple(string)}
@rtype: iterator over L{tree}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "cmudict", file)
for line in open(path).readlines():
fields = line.strip().split(' ')
yield (fields[0], int(fields[1]), tuple(fields[2:]))
def dictionary(files='cmudict'):
d = {}
for word, num, pron in raw(files):
if num == 1:
d[word] = (pron,)
else:
d[word] += (pron,)
return d
def demo():
from en.parser.nltk_lite.corpora import cmudict
from itertools import islice
print "raw method:"
for entry in islice(cmudict.raw(), 40000, 40025):
print entry
print
print "dictionary method:"
cmudict = cmudict.dictionary()
print 'NATURAL', cmudict['NATURAL']
print 'LANGUAGE', cmudict['LANGUAGE']
print 'TOOL', cmudict['TOOL']
print 'KIT', cmudict['KIT']
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: IEER Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Corpus reader for the Information Extraction and Entity Recognition Corpus.
NIST 1999 Information Extraction: Entity Recognition Evaluation
http://www.itl.nist.gov/iad/894.01/tests/ie-er/er_99/er_99.htm
This corpus contains the NEWSWIRE development test data for the
NIST 1999 IE-ER Evaluation. The files were taken from the
subdirectory: /ie_er_99/english/devtest/newswire/*.ref.nwt
and filenames were shortened.
The corpus contains the following files: APW_19980314, APW_19980424,
APW_19980429, NYT_19980315, NYT_19980403, and NYT_19980407.
"""
from en.parser.nltk_lite.corpora import get_basedir, extract
from en.parser.nltk_lite.parse.tree import ieer_chunk
import os
items = ['APW_19980314', 'APW_19980424', 'APW_19980429',
'NYT_19980315', 'NYT_19980403', 'NYT_19980407']
item_name = {
'APW_19980314': 'Associated Press Weekly, 14 March 1998',
'APW_19980424': 'Associated Press Weekly, 24 April 1998',
'APW_19980429': 'Associated Press Weekly, 29 April 1998',
'NYT_19980315': 'New York Times, 15 March 1998',
'NYT_19980403': 'New York Times, 3 April 1998',
'NYT_19980407': 'New York Times, 7 April 1998',
}
def raw(files = items):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "ieer", file)
for doc in open(path).read().split('</DOC>'):
doc = doc.split('<DOC>')
if len(doc) == 2:
yield "<DOC>" + doc[1] + "</DOC>\n"
def dictionary(files = items):
for doc in raw(files):
yield ieer_chunk(doc)
def demo():
from en.parser.nltk_lite.corpora import ieer
from itertools import islice
from pprint import pprint
# pprint(extract(75, ieer.raw()))
pprint(extract(75, ieer.dictionary()))
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: CONLL Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read chunk structures from the CONLL-2000 Corpus
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.parse import tree
import os
items = ['train', 'test']
item_name = {
'train': 'training set',
'test': 'test set'
}
def _list_sent(sent):
return [tokenize.whitespace(line) for line in tokenize.line(sent)]
def raw(files = items):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "conll2000", file + ".txt")
s = open(path).read()
for sent in tokenize.blankline(s):
yield [word for (word, tag, chunk) in _list_sent(sent)]
def tagged(files = items):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "conll2000", file + ".txt")
s = open(path).read()
for sent in tokenize.blankline(s):
yield [(word, tag) for (word, tag, chunk) in _list_sent(sent)]
def chunked(files = items, chunk_types=('NP',)):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "conll2000", file + ".txt")
s = open(path).read()
for sent in tokenize.blankline(s):
yield tree.conll_chunk(sent, chunk_types)
def demo():
from en.parser.nltk_lite.corpora import conll2000
from itertools import islice
print "CONLL Chunked data\n"
print "Raw text:"
for sent in islice(conll2000.raw(), 0, 5):
print sent
print
print "Tagged text:"
for sent in islice(conll2000.tagged(), 0, 5):
print sent
print
print "Chunked text:"
for tree in islice(conll2000.chunked(chunk_types=('NP', 'PP', 'VP')), 0, 5):
print tree.pp()
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: TIMIT Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Haejoong Lee <haejoong@ldc.upenn.edu>
# Steven Bird <sb@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read tokens, phonemes and audio data from the NLTK TIMIT Corpus.
This corpus contains selected portion of the TIMIT corpus.
* 16 speakers from 8 dialect regions
* 1 male and 1 female from each dialect region
* total 130 sentences (10 sentences per speaker. Note that some
sentences are shared among other speakers, especially sa1 and sa2
are spoken by all speakers.)
* total 160 recording of sentences (10 recordings per speaker)
* audio format: NIST Sphere, single channel, 16kHz sampling,
16 bit sample, PCM encoding
Module contents
---------------
The timit module provides 4 functions and 4 data items.
* items
List of items in the corpus. There are total 160 items, each of which
corresponds to a unique utterance of a speaker. Here's an example of an
item in the list:
dr1-fvmh0:sx206
- _---- _---
| | | | |
| | | | |
| | | | `--- sentence number
| | | `----- sentence type (a:all, i:shared, x:exclusive)
| | `--------- speaker ID
| `------------ sex (m:male, f:female)
`-------------- dialect region (1..8)
* speakers
List of speaker IDs. An example of speaker ID:
dr1-fvmh0
Note that if you split an item ID with colon and take the first element of
the result, you will get a speaker ID.
>>> itemid = dr1-fvmh0:sx206
>>> spkrid,sentid = itemid.split(':')
>>> spkrid
'dr1-fvmh0'
The second element of the result is a sentence ID.
* dictionary
Phonetic dictionary of words contained in this corpus. This is a Python
dictionary from words to phoneme lists.
* spkrinfo
Speaker information table. It's a Python dictionary from speaker IDs to
records of 10 fields. Speaker IDs the same as the ones in timie.speakers.
Each record is a dictionary from field names to values, and the fields are
as follows:
id speaker ID as defined in the original TIMIT speaker info table
sex speaker gender (M:male, F:female)
dr speaker dialect region (1:new england, 2:northern,
3:north midland, 4:south midland, 5:southern, 6:new york city,
7:western, 8:army brat (moved around))
use corpus type (TRN:training, TST:test)
in this sample corpus only TRN is available
recdate recording date
birthdate speaker birth date
ht speaker height
race speaker race (WHT:white, BLK:black, AMR:american indian,
SPN:spanish-american, ORN:oriental,???:unknown)
edu speaker education level (HS:high school, AS:associate degree,
BS:bachelor's degree (BS or BA), MS:master's degree (MS or MA),
PHD:doctorate degree (PhD,JD,MD), ??:unknown)
comments comments by the recorder
The 4 functions are as follows.
* raw(sentences=items, offset=False)
Given a list of items, returns an iterator of a list of word lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the word list is a tuple of word(string), start offset and
end offset, where offset is represented as a number of 16kHz samples.
* phonetic(sentences=items, offset=False)
Given a list of items, returns an iterator of a list of phoneme lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the phoneme list is a tuple of word(string), start offset
and end offset, where offset is represented as a number of 16kHz samples.
* audiodata(item, start=0, end=None)
Given an item, returns a chunk of audio samples formatted into a string.
When the fuction is called, if start and end are omitted, the entire
samples of the recording will be returned. If only end is omitted,
samples from the start offset to the end of the recording will be returned.
* play(data)
Play the given audio samples. The audio samples can be obtained from the
timit.audiodata function.
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
from itertools import islice
import ossaudiodev, time
import sys, os, re
if sys.platform.startswith('linux') or sys.platform.startswith('freebsd'):
PLAY_ENABLED = True
else:
PLAY_ENABLED = False
__all__ = ["items", "raw", "phonetic", "speakers", "dictionary", "spkrinfo",
"audiodata", "play"]
PREFIX = os.path.join(get_basedir(),"timit")
speakers = []
items = []
dictionary = {}
spkrinfo = {}
for f in os.listdir(PREFIX):
if re.match("^dr[0-9]-[a-z]{4}[0-9]$", f):
speakers.append(f)
for g in os.listdir(os.path.join(PREFIX,f)):
if g.endswith(".txt"):
items.append(f+':'+g[:-4])
speakers.sort()
items.sort()
# read dictionary
for l in open(os.path.join(PREFIX,"timitdic.txt")):
if l[0] == ';': continue
a = l.strip().split(' ')
dictionary[a[0]] = a[1].strip('/').split()
# read spkrinfo
header = ['id','sex','dr','use','recdate','birthdate','ht','race','edu',
'comments']
for l in open(os.path.join(PREFIX,"spkrinfo.txt")):
if l[0] == ';': continue
rec = l[:54].split() + [l[54:].strip()]
key = "dr%s-%s%s" % (rec[2],rec[1].lower(),rec[0].lower())
spkrinfo[key] = dict([(header[i],rec[i]) for i in range(10)])
def _prim(ext, sentences=items, offset=False):
if isinstance(sentences,str):
sentences = [sentences]
for sent in sentences:
fnam = os.path.sep.join([PREFIX] + sent.split(':')) + ext
r = []
for l in open(fnam):
if not l.strip(): continue
a = l.split()
if offset:
r.append((a[2],int(a[0]),int(a[1])))
else:
r.append(a[2])
yield r
def raw(sentences=items, offset=False):
"""
Given a list of items, returns an iterator of a list of word lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the word list is a tuple of word(string), start offset and
end offset, where offset is represented as a number of 16kHz samples.
@param sentences: List of items (sentences) for which tokenized word list
will be returned. In case there is only one item, it is possible to
pass the item id as a string.
@type sentences: list of strings or a string
@param offset: If True, the start and end offsets are accompanied to each
word in the returned list. Note that here, an offset is represented by
the number of 16kHz samples.
@type offset: bool
@return: List of list of strings (words) if offset is False. List of list
of tuples (word, start offset, end offset) if offset if True.
"""
return _prim(".wrd", sentences, offset)
def phonetic(sentences=items, offset=False):
"""
Given a list of items, returns an iterator of a list of phoneme lists,
each of which corresponds to an item (sentence). If offset is set to True,
each element of the phoneme list is a tuple of word(string), start offset
and end offset, where offset is represented as a number of 16kHz samples.
@param sentences: List of items (sentences) for which phoneme list
will be returned. In case there is only one item, it is possible to
pass the item id as a string.
@type sentences: list of strings or a string
@param offset: If True, the start and end offsets are accompanied to each
phoneme in the returned list. Note that here, an offset is represented by
the number of 16kHz samples.
@type offset: bool
@return: List of list of strings (phonemes) if offset is False. List of
list of tuples (phoneme, start offset, end offset) if offset if True.
"""
return _prim(".phn", sentences, offset)
def audiodata(item, start=0, end=None):
"""
Given an item, returns a chunk of audio samples formatted into a string.
When the fuction is called, if start and end are omitted, the entire
samples of the recording will be returned. If only end is omitted,
samples from the start offset to the end of the recording will be returned.
@param start: start offset
@type start: integer (number of 16kHz frames)
@param end: end offset
@type end: integer (number of 16kHz frames) or None to indicate
the end of file
@return: string of sequence of bytes of audio samples
"""
assert(end is None or end > start)
headersize = 44
fnam = os.path.join(PREFIX,item.replace(':',os.path.sep)) + '.wav'
if end is None:
data = open(fnam).read()
else:
data = open(fnam).read(headersize+end*2)
return data[headersize+start*2:]
def play(data):
"""
Play the given audio samples.
@param data: audio samples
@type data: string of bytes of audio samples
"""
if not PLAY_ENABLED:
print >>sys.stderr, "sorry, currently we don't support audio playback on this platform:", sys.platform
return
try:
dsp = ossaudiodev.open('w')
except IOError, e:
print >>sys.stderr, "can't acquire the audio device; please activate your audio device."
print >>sys.stderr, "system error message:", str(e)
return
dsp.setfmt(ossaudiodev.AFMT_S16_LE)
dsp.channels(1)
dsp.speed(16000)
dsp.write(data)
dsp.close()
def demo():
from en.parser.nltk_lite.corpora import timit
print "6th item (timit.items[5])"
print "-------------------------"
itemid = timit.items[5]
spkrid, sentid = itemid.split(':')
print " item id: ", itemid
print " speaker id: ", spkrid
print " sentence id:", sentid
print
record = timit.spkrinfo[spkrid]
print " speaker information:"
print " TIMIT speaker id: ", record['id']
print " speaker sex: ", record['sex']
print " dialect region: ", record['dr']
print " data type: ", record['use']
print " recording date: ", record['recdate']
print " date of birth: ", record['birthdate']
print " speaker height: ", record['ht']
print " speaker race: ", record['race']
print " speaker education:", record['edu']
print " comments: ", record['comments']
print
print " words of the sentence:"
print " ", timit.raw(sentences=itemid).next()
print
print " words of the sentence with offsets (first 3):"
print " ", timit.raw(sentences=itemid, offset=True).next()[:3]
print
print " phonemes of the sentence (first 10):"
print " ", timit.phonetic(sentences=itemid).next()[:10]
print
print " phonemes of the sentence with offsets (first 3):"
print " ", timit.phonetic(sentences=itemid, offset=True).next()[:3]
print
print " looking up dictionary for words of the sentence..."
words = timit.raw(sentences=itemid).next()
for word in words:
print " %-5s:" % word, timit.dictionary[word]
print
print "audio playback:"
print "---------------"
print " playing sentence", sentid, "by speaker", spkrid, "(a.k.a. %s)"%record["id"], "..."
data = timit.audiodata(itemid)
timit.play(data)
print
print " playing words:"
words = timit.raw(sentences=itemid, offset=True).next()
for word, start, end in words:
print " playing %-10s in 1.5 seconds ..." % `word`
time.sleep(1.5)
data = timit.audiodata(itemid, start, end)
timit.play(data)
print
print " playing phonemes (first 10):"
phones = timit.phonetic(sentences=itemid, offset=True).next()
for phone, start, end in phones[:10]:
print " playing %-10s in 1.5 seconds ..." % `phone`
time.sleep(1.5)
data = timit.audiodata(itemid, start, end)
timit.play(data)
print
# play sentence sa1 of all female speakers
sentid = 'sa1'
for spkr in timit.speakers:
if timit.spkrinfo[spkr]['sex'] == 'F':
itemid = spkr + ':' + sentid
print " playing sentence %s of speaker %s ..." % (sentid, spkr)
data = timit.audiodata(itemid)
timit.play(data)
print
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Corpus Readers
#
# Copyright (C) 2001-2005 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
import os, sys
def set_basedir(path):
"""
Set the path to the directory where NLTK looks for corpora.
@type path: C{string}
@param path: The path to the directory where NLTK should look for corpora.
"""
global _BASEDIR
_BASEDIR = path
def get_basedir():
"""
@return: The path of the directory where NLTK looks for corpora.
@rtype: C{string}
"""
return _BASEDIR
# Find a default base directory.
if os.environ.has_key('NLTK_LITE_CORPORA'):
set_basedir(os.environ['NLTK_LITE_CORPORA'])
elif sys.platform.startswith('win'):
if os.path.isdir(os.path.join(sys.prefix, 'nltk_lite')):
set_basedir(os.path.join(sys.prefix, 'nltk_lite'))
elif os.path.isdir(os.path.join(sys.prefix, 'lib', 'nltk_lite')):
set_basedir(os.path.join(sys.prefix, 'lib', 'nltk_lite'))
else:
set_basedir(os.path.join(sys.prefix, 'nltk_lite'))
elif os.path.isdir('/usr/share/nltk_lite'):
set_basedir('/usr/share/nltk_lite')
elif os.path.isdir('/usr/local/share/nltk_lite'):
set_basedir('/usr/local/share/nltk_lite')
elif os.path.isdir('/usr/lib/nltk_lite'):
set_basedir('/usr/lib/nltk_lite')
elif os.path.isdir('/usr/local/lib/nltk_lite'):
set_basedir('/usr/local/lib/nltk_lite')
elif os.path.isdir('/usr/share/nltk_lite'):
set_basedir('/usr/share/nltk_lite')
else:
set_basedir('/usr/lib/nltk_lite')
# Access to individual corpus items
# extract the nth item from iterator i
from itertools import islice
def extract(n, i):
return list(islice(i, n, n+1))[0]
| Python |
# Natural Language Toolkit: Brown Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read tokens from the Brown Corpus.
Brown Corpus: A Standard Corpus of Present-Day Edited American
English, for use with Digital Computers, by W. N. Francis and
H. Kucera (1964), Department of Linguistics, Brown University,
Providence, Rhode Island, USA. Revised 1971, Revised and
Amplified 1979. Distributed with NLTK with the permission of the
copyright holder. Source: http://www.hit.uib.no/icame/brown/bcm.html
The Brown Corpus is divided into the following files:
a. press: reportage
b. press: editorial
c. press: reviews
d. religion
e. skill and hobbies
f. popular lore
g. belles-lettres
h. miscellaneous: government & house organs
j. learned
k: fiction: general
l: fiction: mystery
m: fiction: science
n: fiction: adventure
p. fiction: romance
r. humor
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite.tag import string2tags, string2words
import os
items = list('abcdefghjklmnpr')
item_name = {
'a': 'press: reportage',
'b': 'press: editorial',
'c': 'press: reviews',
'd': 'religion',
'e': 'skill and hobbies',
'f': 'popular lore',
'g': 'belles-lettres',
'h': 'miscellaneous: government & house organs',
'j': 'learned',
'k': 'fiction: general',
'l': 'fiction: mystery',
'm': 'fiction: science',
'n': 'fiction: adventure',
'p': 'fiction: romance',
'r': 'humor'
}
def _read(files, conversion_function):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "brown", file)
f = open(path).read()
for sent in tokenize.blankline(f):
yield conversion_function(sent)
def raw(files = items):
return _read(files, string2words)
def tagged(files = items):
return _read(files, string2tags)
def demo():
from en.parser.nltk_lite.corpora import brown
from itertools import islice
from pprint import pprint
pprint(list(islice(brown.raw('a'), 0, 5)))
pprint(list(islice(brown.tagged('a'), 0, 5)))
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Gutenberg Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read tokens from the NLTK Gutenberg Corpus.
Project Gutenberg -- http://gutenberg.net/
This corpus contains selected texts from Project Gutenberg:
* Jane Austen (3)
* William Blake (2)
* G. K. Chesterton (3)
* King James Bible
* John Milton
* William Shakespeare (3)
* Walt Whitman
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
import os, re
items = [
'austen-emma',
'austen-persuasion',
'austen-sense',
'bible-kjv',
'blake-poems',
'blake-songs',
'chesterton-ball',
'chesterton-brown',
'chesterton-thursday',
'milton-paradise',
'shakespeare-caesar',
'shakespeare-hamlet',
'shakespeare-macbeth',
'whitman-leaves'
]
item_name = {
'austen-emma': 'Jane Austen: Emma',
'austen-persuasion': 'Jane Austen: Persuasion',
'austen-sense': 'Jane Austen: Sense and Sensibility',
'bible-kjv': 'King James Bible',
'blake-poems': 'William Blake: Poems',
'blake-songs': 'Willian Blake: Songs of Innocence and Experience',
'chesterton-ball': 'G.K. Chesterton: The Ball and The Cross',
'chesterton-brown': 'G.K. Chesterton: The Wisdom of Father Brown',
'chesterton-thursday': 'G.K. Chesterton: The Man Who Was Thursday',
'milton-paradise': 'John Milton: Paradise Lost',
'shakespeare-caesar': 'William Shakespeare: Julius Caesar',
'shakespeare-hamlet': 'William Shakespeare: Hamlet',
'shakespeare-macbeth': 'William Shakespeare: Macbeth',
'whitman-leaves': 'Walt Whitman: Leaves of Grass',
}
def raw(files = items):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "gutenberg", file + ".txt")
f = open(path)
preamble = True
for line in f.readlines():
if not preamble:
for t in tokenize.wordpunct(line):
yield t
if line[:5] == '*END*':
preamble = False
def demo():
from en.parser.nltk_lite.corpora import gutenberg
from itertools import islice
for word in islice(gutenberg.raw('bible-kjv'), 0, 100):
print word,
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Wordlist Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read tokens from the Wordlist Corpus.
"""
from en.parser.nltk_lite.corpora import get_basedir
import os
items = ['en']
item_name = {
'en': 'English Wordlist',
}
def raw(files = items):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "words", file)
for word in open(path).readlines():
yield word.strip()
def demo():
from en.parser.nltk_lite.corpora import words
from itertools import islice
from pprint import pprint
pprint(list(islice(words.raw(), 0, 20)))
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Presidential State of the Union Addres Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
C-Span Inaugural Address Corpus
US presidential inaugural addresses 1789-2005
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
import os, re
items = [
'1789-Washington',
'1793-Washington',
'1797-Adams',
'1801-Jefferson',
'1805-Jefferson',
'1809-Madison',
'1813-Madison',
'1817-Monroe',
'1821-Monroe',
'1825-Adams',
'1829-Jackson',
'1833-Jackson',
'1837-VanBuren',
'1841-Harrison',
'1845-Polk',
'1849-Taylor',
'1853-Pierce',
'1857-Buchanan',
'1861-Lincoln',
'1865-Lincoln',
'1869-Grant',
'1873-Grant',
'1877-Hayes',
'1881-Garfield',
'1885-Cleveland',
'1889-Harrison',
'1893-Cleveland',
'1897-McKinley',
'1901-McKinley',
'1905-Roosevelt',
'1909-Taft',
'1913-Wilson',
'1917-Wilson',
'1921-Harding',
'1925-Coolidge',
'1929-Hoover',
'1933-Roosevelt',
'1937-Roosevelt',
'1941-Roosevelt',
'1945-Roosevelt',
'1949-Truman',
'1953-Eisenhower',
'1957-Eisenhower',
'1961-Kennedy',
'1965-Johnson',
'1969-Nixon',
'1973-Nixon',
'1977-Carter',
'1981-Reagan',
'1985-Reagan',
'1989-Bush',
'1993-Clinton',
'1997-Clinton',
'2001-Bush',
'2005-Bush'
]
def raw(files = items):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "inaugural", file + ".txt")
f = open(path)
preamble = True
text = f.read()
for t in tokenize.wordpunct(text):
yield t
def demo():
from en.parser.nltk_lite.corpora import inaugural
for speech in inaugural.items:
year = speech[:4]
freq = list(inaugural.raw(speech)).count('men')
print year, freq
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Word Finder
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
# Simplified from PHP version by Robert Klein <brathna@gmail.com>
# http://fswordfinder.sourceforge.net/
import random
from string import strip, join
# reverse a word with probability 0.5
def revword(word):
if random.randint(1,2) == 1:
word = list(word)
word.reverse()
return ''.join(word)
return word
# try to insert word at position x,y; direction encoded in xf,yf
def step(word, x, xf, y, yf, grid):
for i in range(len(word)):
if grid[xf(i)][yf(i)] != "" and grid[xf(i)][yf(i)] != word[i]:
return False
for i in range(len(word)):
grid[xf(i)][yf(i)] = word[i]
return True
# try to insert word at position x,y, in direction dir
def check(word, dir, x, y, grid, rows, cols):
if dir==1:
if x-len(word)<0 or y-len(word)<0:
return False
return step(word, x, lambda i:x-i, y, lambda i:y-i, grid)
elif dir==2:
if x-len(word)<0:
return False
return step(word, x, lambda i:x-i, y, lambda i:y, grid)
elif dir==3:
if x-len(word)<0 or y+(len(word)-1)>=cols:
return False
return step(word, x, lambda i:x-i, y, lambda i:y+i, grid)
elif dir==4:
if y-len(word)<0:
return False
return step(word, x, lambda i:x, y, lambda i:y-i, grid)
def wordfinder(words, rows=20, cols=20, attempts=50, alph='ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""
Attempt to arrange words into a letter-grid with the specified number of
rows and columns. Try each word in several positions and directions, until
it can be fitted into the grid, or the maximum number of allowable attempts
is exceeded. Returns a tuple consisting of the grid and the words that were
successfully placed.
@param words: the list of words to be put into the grid
@type words: C(list)
@param rows: the number of rows in the grid
@type rows: C(int)
@param cols: the number of columns in the grid
@type cols: C(int)
@param attempts: the number of times to attempt placing a word
@type attempts: C(int)
@param alph: the alpabet, to be used for filling blank cells
@type alph: C(list)
@rtype: C(tuple)
"""
# place longer words first
words.sort(cmp=lambda x,y:cmp(len(x),len(y)), reverse=True)
grid = [] # the letter grid
used = [] # the words we used
# initialize the grid
for i in range(rows):
grid.append([""] * cols)
# try to place each word
for word in words:
word = strip(word).upper() # normalize
save = word # keep a record of the word
word = revword(word)
for attempt in range(attempts):
r = random.randint(0, len(word))
dir = random.choice([1,2,3,4])
x = random.randint(0,rows)
y = random.randint(0,cols)
if dir==1: x+=r; y+=r
elif dir==2: x+=r
elif dir==3: x+=r; y-=r
elif dir==4: y+=r
if 0<=x<rows and 0<=y<cols:
if check(word, dir, x, y, grid, rows, cols):
# used.append((save, dir, x, y, word))
used.append(save)
break
# Fill up the remaining spaces
for i in range(rows):
for j in range(cols):
if grid[i][j] == '':
grid[i][j] = random.choice(alph)
return grid, used
def demo():
from en.parser.nltk_lite.corpora import words
wordlist = list(words.raw())
random.shuffle(wordlist)
wordlist = wordlist[:200]
wordlist = [w for w in wordlist if 3 <= len(w) <= 12]
grid, used = wordfinder(wordlist)
print "Word Finder\n"
for i in range(len(grid)):
for j in range(len(grid[i])):
print grid[i][j],
print
print
for i in range(len(used)):
print "%d:" % (i+1), used[i]
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: List Sorting
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
This module provides a variety of list sorting algorithms, to
illustrate the many different algorithms (recipes) for solving a
problem, and how to analyze algorithms experimentally.
"""
# These algorithms are taken from:
# Levitin (2004) The Design and Analysis of Algorithms
##################################################################
# Selection Sort
##################################################################
def selection(a):
"""
Selection Sort: scan the list to find its smallest element, then
swap it with the first element. The remainder of the list is one
element smaller; apply the same method to this list, and so on.
"""
count = 0
for i in range(len(a) - 1):
min = i
for j in range(i+1, len(a)):
if a[j] < a[min]:
min = j
count += 1
a[min],a[i] = a[i],a[min]
return count
##################################################################
# Bubble Sort
##################################################################
def bubble(a):
"""
Bubble Sort: compare adjacent elements of the list left-to-right,
and swap them if they are out of order. After one pass through
the list swapping adjacent items, the largest item will be in
the rightmost position. The remainder is one element smaller;
apply the same method to this list, and so on.
"""
count = 0
for i in range(len(a)-1):
for j in range(len(a)-i-1):
if a[j+1] < a[j]:
a[j],a[j+1] = a[j+1],a[j]
count += 1
return count
##################################################################
# Merge Sort
##################################################################
def _merge_lists(b, c):
count = 0
i = j = 0
a = []
while (i < len(b) and j < len(c)):
count += 1
if b[i] <= c[j]:
a.append(b[i])
i += 1
else:
a.append(c[j])
j += 1
if i == len(b):
a += c[j:]
else:
a += b[i:]
return a, count
def merge(a):
"""
Merge Sort: split the list in half, and sort each half, then
combine the sorted halves.
"""
count = 0
if len(a) > 1:
midpoint = len(a)/2
b = a[:midpoint]
c = a[midpoint:]
count_b = merge(b)
count_c = merge(c)
a, count_a = _merge_lists(b, c)
count = count_a + count_b + count_c
return count
##################################################################
# Quick Sort
##################################################################
def _partition(a, l, r):
p = a[l]; i = l; j = r+1
count = 0
while True:
while i < r:
i += 1
if a[i] >= p: break
while j > l:
j -= 1
if j < l or a[j] <= p: break
a[i],a[j] = a[j],a[i] # swap
count += 1
if i >= j: break
a[i],a[j] = a[j],a[i] # undo last swap
a[l],a[j] = a[j],a[l]
return j, count
def _quick(a, l, r):
count = 0
if l<r:
s, count = _partition(a, l, r)
count += _quick(a, l, s-1)
count += _quick(a, s+1, r)
return count
def quick(a):
return _quick(a, 0, len(a)-1)
##################################################################
# Demonstration
##################################################################
def demo():
from random import shuffle
for size in (10, 20, 50, 100, 200, 500, 1000):
a = range(size)
# various sort methods
shuffle(a); count_selection = selection(a)
shuffle(a); count_bubble = bubble(a)
shuffle(a); count_merge = merge(a)
shuffle(a); count_quick = quick(a)
print "size=%5d: selection=%8d, bubble=%8d, merge=%6d, quick=%6d" %\
(size, count_selection, count_bubble, count_merge, count_quick)
if __name__ == '__main__':
demo()
| Python |
# Natural Language Toolkit: Miscellaneous modules
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
| Python |
# Natural Language Toolkit (NLTK-Lite)
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Authors: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
NLTK-Lite is a collection of lightweight NLP modules designed for
maximum simplicity and efficiency. NLTK-Lite only covers the simple
variants of standard data structures and tasks. It makes extensive
use of iterators so that large tasks generate output as early as
possible.
Key differences from NLTK are as follows:
- tokens are represented as strings, tuples, or trees
- all tokenizers are iterators
- less object orientation
NLTK-Lite is primarily intended to facilitate teaching NLP to students
having limited programming experience. The focus is on teaching
Python together with the help of NLP recipes, instead of teaching
students to use a large set of specialized classes.
@version: 0.7a2
"""
##//////////////////////////////////////////////////////
## Metadata
##//////////////////////////////////////////////////////
# Version. For each new release, the version number should be updated
# here and in the Epydoc comment (above).
__version__ = "0.7a2"
# Copyright notice
__copyright__ = """\
Copyright (C) 2001-2006 University of Pennsylvania.
Distributed and Licensed under provisions of the GNU Public
License, which is included by reference.
"""
__license__ = "GNU Public License"
# Description of the toolkit, keywords, and the project's primary URL.
__longdescr__ = """\
The Natural Langauge Toolkit (NLTK-Lite) is a Python package for
processing natural language text. It was developed as a simpler,
lightweight version of NLTK. NLTK-Lite requires Python 2.4 or higher."""
__keywords__ = ['NLP', 'CL', 'natural language processing',
'computational linguistics', 'parsing', 'tagging',
'tokenizing', 'syntax', 'linguistics', 'language',
'natural language']
__url__ = "http://nltk.sf.net/"
# Maintainer, contributors, etc.
__maintainer__ = "Steven Bird"
__maintainer_email__ = "sb@csse.unimelb.edu.au"
__author__ = __maintainer__
__author_email__ = __maintainer_email__
| Python |
# PARSER - last updated for NodeBox 1.9.2
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
# The part-of-speech-tagger was adopted by Jason Wiener from Mark Watson:
# http://jasonwiener.wordpress.com/category/nlp/
# Based on Brill's lexicon.
# The chunker relies on NLTK from the University of Pennsylvania:
# http://nltk.sourceforge.net/
# I changed the import statements in NLTK from
# "from nltk_lite." to "from en.parser.nltk_lite." for them to work.
# Additionally, two lines in ntlk_lite/probability.py
# were try/excepted (search source for "numpy").
# They use imported tools from NumPy but are not needed here,
# so NumPy (which is 7MB) is unnecessary.
import pickle
import re
### PART OF SPEECH TAGGER ############################################################################
class PartOfSpeechTagger:
"""
Original Copyright (C) Mark Watson. All rights reserved.
Python port by Jason Wiener (http://www.jasonwiener.com)
THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
PARTICULAR PURPOSE.
"""
lexHash = {}
def __init__(self):
if(len(self.lexHash) == 0):
import os
path = os.path.join(os.path.dirname(__file__), "Brill_lexicon")
upkl = open(path, 'r')
self.lexHash = pickle.load(upkl)
upkl.close()
def tokenize(self,s):
v = []
reg = re.compile('(\S+)\s')
m = reg.findall(s+" ");
for m2 in m:
if len(m2) > 0:
if m2.startswith("("):
v.append(m2[0])
m2 = m2[1:]
if m2.endswith(";") \
or m2.endswith(",") \
or m2.endswith("?") \
or m2.endswith(")") \
or m2.endswith(":") \
or m2.endswith(".") \
or m2.endswith("!"):
v.append(m2[0:-1])
v.append(m2[-1])
else:
v.append(m2)
return v
def tag(self,words):
ret = []
for i in range(len(words)):
ret.append("NN") #the default entry
if self.lexHash.has_key(words[i]):
ret[i] = self.lexHash[words[i]]
elif self.lexHash.has_key(words[i].lower()):
ret[i] = self.lexHash[words[i].lower()]
#apply transformational rules
for i in range(len(words)):
#rule 1 : DT, {VBD | VBP} --> DT, NN
if i > 0 and ret[i-1] == "DT":
if ret[i] == "VBD" or ret[i] == "VBP" or ret[i] == "VB":
ret[i] = "NN"
#rule 2: convert a noun to a number (CD) if "." appears in the word
if ret[i].startswith("N"):
if words[i].find(".") > -1:
ret[i] = "CD"
# rule 3: convert a noun to a past participle if ((string)words[i]) ends with "ed"
if ret[i].startswith("N") and words[i].endswith("ed"):
ret[i] = "VBN"
# rule 4: convert any type to adverb if it ends in "ly"
if words[i].endswith("ly"):
ret[i] = "RB"
# rule 5: convert a common noun (NN or NNS) to a adjective if it ends with "al"
if ret[i].startswith("NN") and words[i].endswith("al"):
ret[i] = "JJ"
# rule 6: convert a noun to a verb if the preceeding work is "would"
if i > 0 and ret[i].startswith("NN") and words[i - 1].lower() == "would":
ret[i] = "VB"
# rule 7: if a word has been categorized as a common noun and it ends with "s",
# then set its type to plural common noun (NNS)
if ret[i] == "NN" and words[i].endswith("s"):
ret[i] = "NNS"
# rule 8: convert a common noun to a present participle verb (i.e., a gerand)
if ret[i].startswith("NN") and words[i].endswith("ing"):
ret[i] = "VBG"
return ret
pos_tagger = PartOfSpeechTagger()
class TaggedSentence(list):
""" A list of (token, tag) tuples representing a POS tagged sentence.
When printed or transformed with str(),
is represented as a string of token/tag.
For example:
[(the,DT), (cat,NN), (likes,VBZ), (fish,NN)] ->
the/DT cat/NN likes/VBZ fish/NN
"""
def __repr__(self):
str = [token+"/"+tag for token, tag in self]
str = " ".join(str)
return str
def sentence_tag(sentence):
""" Returns a tagged sentence.
Part of speech tagging assigns a marker
to each word in the sentence,
as corresponding to a particular part of speech
(nouns, verbs, adjectives, ...)
Tagging is done using Jason Wiener's
parser and a Brill lexicon.
Tagging involves a lot of ambiguity.
For example, fish has different meanings:
cats like fish <-> men like to fish
"""
tokens = pos_tagger.tokenize(sentence)
tags = pos_tagger.tag(tokens)
tagged = TaggedSentence()
for i in range(len(tokens)):
tagged.append((tokens[i], tags[i]))
return tagged
### CHUNKING #########################################################################################
from nltk_lite.parse import chunk as nltk_chunk
from nltk_lite.parse import tree as nltk_tree
# Simple regular expression rules for chunking.
chunk_rules = [
("NP", r"<DT|CD|JJ.*|PRP.*|NN.*>+", "noun phrases with determiner, adjectives, nouns"),
("PP", r"<IN><NP>", "preposition (in, of, on) followed by noun phrase"),
("VP", r"<RB.*|RP|VB.*|MD|TO>+", "verb phrases"),
("VA", r"<VP><NP|PP|S>", "verbs and arguments/adjuncts"),
("S", r"<NP|PP|PRP><VP|VA>", "subject")
]
def sentence_chunk(sentence):
""" Chunks a tagged sentence into syntactically correlated parts of words.
"""
tagged = sentence_tag(sentence)
tagged = str(tagged)
leaves = nltk_tree.chunk(tagged).leaves()
tree = nltk_tree.Tree("", leaves)
for tag, rule, desc in chunk_rules:
r = nltk_chunk.ChunkRule(rule,"")
chunker = nltk_chunk.RegexpChunk([r], chunk_node=tag)
tree = chunker.parse(tree)
return _traverse_chunktree(tree)
def _traverse_chunktree(tree):
""" Converts the output of sentence_chunk() to a Python list.
sentence_chunk() generates an NLTK Tree object,
but I want something straightforward as a list of lists here.
For example:
we are going to school ->
[['SP',
['NP', ('we', 'PRP')],
['AP',
['VP', ('are', 'VBP'), ('going', 'VBG'), ('to', 'TO')],
['NP', ('school', 'NN')]]]]
"""
list = []
for child in tree:
if isinstance(child, nltk_tree.Tree):
list.append(_traverse_chunktree(child))
list[-1].insert(0, child.node)
elif isinstance(child, tuple):
list.append(child)
return list
def sentence_traverse(sentence, f):
""" Chunks sentence and feeds its parts to function f.
The sentence is chunked and traversed recusively.
Each chunk is fed to def f(chunk, token, tag).
The chunk parameter is either a string or None,
in which case token and tag are strings.
"""
def _traverse(tree):
for child in tree:
if isinstance(child, str) and child in chunks:
f(child, None, None)
elif isinstance(child, tuple):
f(None, child[0], child[1])
elif isinstance(child, list):
_traverse(child)
chunks = [tag for tag, rule, desc in chunk_rules]
sentence = sentence_chunk(sentence)
_traverse(sentence)
### PATTERN MATCHING #################################################################################
# A powerful mechanism for searching tagged text.
# "Beautiful fresh flowers and plants are all around the lush garden."
# "(JJ) (JJ) NN" --> Beautiful fresh flowers, plants, lush garden.
# We can use it to compare stuff (NN is bigger than NN),
# to aggregate commonsense data (red NN | NN VB red), etc.
def combinations(items, n):
""" Returns all possible combinations of length n of the given items.
"""
if n == 0: yield []
else:
for i in xrange(len(items)):
for c in combinations(items, n-1):
yield [items[i]] + c
def is_optional(pattern):
""" An optional pattern is enclosed in brackets.
"""
if pattern.startswith("(") and pattern.endswith(")"):
return True
return False
def variations(pattern):
""" Returns all possible variations of a pattern containing optional pieces.
"""
# Boolean pattern, True where pattern is optional.
# (JJ) (NN) NN --> True True False
o = [is_optional(p) for p in pattern]
V = []
# All the possible True/False combinations of optionals.
# (JJ) (NN) NN --> True True, True False, False True, False False.
for c in combinations([True, False], sum(o)):
# If True in boolean pattern, replace by boolean in current combination.
# (JJ) (NN) NN --> True True False, True False False, False True False, False False False.
v = [b and (b and c.pop(0)) for b in o]
# Replace True by pattern at that index.
# --> (JJ) (NN) NN, (JJ) NN, (NN) NN, NN.
v = [pattern[i] for i in range(len(v)) if not v[i]]
v = [p.strip("()") for p in v]
if v not in V: V.append(v)
# Longest-first.
V.sort(lambda a, b: len(b) - len(a))
return V
# 1) Pattern NN matches /NN as well as /NNS tokens.
# 2) Pattern "new" matches token "new".
# 3) Pattern "*" matches any token.
# 4) Pattern "new*" matches tokens "new", "news", "newest", ...
# 5) Pattern "*new" matches tokens "new", "renew", ...
# 6) Pattern "*new*" matches "new", "renewal", ...
matching_rules = [
lambda p, token, tag: tag.startswith(p),
lambda p, token, tag: token == p,
lambda p, token, tag: p == "*",
lambda p, token, tag: p.endswith("*") and token.startswith(p[:-1]),
lambda p, token, tag: p.startswith("*") and token.endswith(p[1:]),
lambda p, token, tag: p.startswith("*") and p.endswith("*") and token.find(p[1:-1]) >= 0
]
def is_match(pattern, token, tag):
""" Returns True if one of the rules matches pattern to token/tag.
"""
# Case-insensitive search:
pattern, token, tag = pattern.lower(), token.lower(), tag.lower()
for r in matching_rules:
if r(pattern, token, tag): return True
return False
def matches(sentence, pattern, chunked=True):
""" Find sequences of tokens that match the pattern.
The pattern can include tokes, part-of-speech tags and wildcards.
The algorithm is greedy: it will return the longest possible match.
Example: "The new president was in the news" --> "new* (NN)" --> ["new president", "news"].
"""
t = sentence_tag(sentence)
v = variations(pattern.split())
m = []
# Move from token to token in the sentence.
i = 0
while i < len(t):
# Check each variation of the pattern.
for p in v:
# If it is smaller than the remainder of the sentence,
# see if it matches the next tokens in the sentence.
# In this case is_match() will return True for each token (count them).
n = len(p)
if n <= len(t[i:]):
b = sum( [is_match(p, token, tag)
for p, (token, tag) in zip(p, t[i:i+n])] )
if b == len(t[i:i+n]):
# Found the longest possible pattern,
# greedily skip to the next part of the sentence.
m.append(t[i:i+n])
i += n
break
i += 1
if not chunked:
for i in range(len(m)):
m[i] = " ".join([token for token, tag in m[i]])
return m
sentence_find = matches
### PART OF SPEECH TAGS ##############################################################################
# A description and an example for each part-of-speech
# used in tagging and chunking.
# See http://en.wikipedia.org/wiki/Brown_Corpus#Part-of-speech_tags_used.
pos_tags = {
"np" : ("noun phrase", "the pink panther"),
"vp" : ("verb phrase", "die laughing madly"),
"va" : ("verb phrase and arguments", "telling a lie"),
"s" : ("subject phrase", "suzy [is telling [a lie]]"),
"ax" : ("", ""),
"vb" : ("verb, base form", "think"),
"vbz" : ("verb, 3rd person singular present", "she thinks"),
"vbp" : ("verb, non-3rd person singular present", "I think"),
"vbd" : ("verb, past tense", "they talked"),
"vbn" : ("verb, past participle", "a sunken ship"),
"vbg" : ("verb, gerund or present participle", "programming is fun"),
"md" : ("verb, modal auxillary", "may, should, wouldn't"),
"nn" : ("noun, singular or mass", "tiger, chair, laughter"),
"nns" : ("noun, plural", "tigers, chairs, insects"),
"nnp" : ("noun, proper singular", "Germany, God, Alice"),
"nnps" : ("noun, proper plural", "we met two Christmases ago"),
"jj" : ("adjective", "nice, easy, boring"),
"jjr" : ("adjective, comparative", "nicer, easier, more boring"),
"jjs" : ("adjective, superlative", "nicest, easiest, most boring"),
"rb" : ("adverb", "extremely, loudly, hard"),
"wrb" : ("adverb, wh-", "where, when"),
"rbr" : ("adverb, comparative", "better"),
"rbs" : ("adverb, superlative", "best"),
"rp" : ("adverb, particle", "about, off, up"),
"prp" : ("pronoun, personal", "me, you, it"),
"prp$" : ("pronoun, possessive", "my, your, our"),
"wp" : ("pronoun, personal", "what, who, whom"),
"wp$" : ("pronoun, possessive", "whose, whosever"),
"pdt" : ("", ""),
"wdt" : ("determiner", "which, whatever, whichever"),
"dt" : ("determiner", "the, a, these"),
"ex" : ("existential there", "there were six boys"),
"cc" : ("conjunction, coordinating", "and, or, but"),
"in" : ("conjunction, subordinating or preposition", "of, on, before, unless"),
"to" : ("infinitival to", "what to do?"),
"cd" : ("cardinal number", "fixe, three, 13%"),
"uh" : ("interjection", "oh, oops, gosh"),
"fw" : ("foreign word", "mais"),
"sym" : ("", ""),
"." : ("punctuation mark, sentence closer", ".;?*"),
"," : ("punctuation mark, comma", ","),
":" : ("punctuation mark, colon", ":"),
"(" : ("contextual separator, left paren", "("),
")" : ("contextual separator, right paren", ")"),
"ls" : ("", "")
}
def tag_description(postag):
return pos_tags[postag.lower()]
#s = "that cat looks like a hamster"
#s = "the sun is shining"
#s = "that has been plaguing john"
#s = "he is always trying to feed her with lies"
#s = "we are going to school"
#from pprint import pprint
#pprint( sentence_chunk(s) )
#def callback(chunk, token, tag):
# if chunk != None : print tag_description(chunk)[0].upper()
# if chunk == None : print token, "("+tag_description(tag)[0]+")"
#sentence_traverse(s, callback) | Python |
# QUANTIFY - last updated for NodeBox 1rc7
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
# Based on the Ruby Linguistics module by Michael Granger:
# http://www.deveiate.org/projects/Linguistics/wiki/English
from article import article
from numeral import spoken_number, thousands as numeral_thousands
from plural import plural
from math import log, pow
quantify_custom_plurals = {
"built-in function" : "built-in functions"
}
def quantify(word, number=0):
""" Returns a phrase describing the number of given objects.
Two objects are described as being a pair,
smaller than eight is several,
smaller than twenty is a number of,
smaller than two hundred are dozens,
anything bigger is described as being
tens or hundreds of thousands or millions.
For example:
chicken, 100 -> dozens of chickens
"""
def _plural(word):
return plural(word, custom=quantify_custom_plurals)
if number == 0:
return "no " + _plural(word)
if number == 1:
return article(word)
if number == 2:
return "a pair of " + _plural(word)
if number in range(3,8):
return "several " + _plural(word)
if number in range(8,20):
return "a number of " + _plural(word)
if number in range(20,200):
return "dozens of " + _plural(word)
if number >= 200:
thousands = int( log(number, 10) / 3 )
subthousands = int( log(number, 10) % 3 )
if subthousands == 2:
stword = "hundreds of "
elif subthousands == 1:
stword = "tens of "
else:
stword = ""
if thousands > 0:
thword = _plural(numeral_thousands(thousands-1)) + " of "
else:
thword = ""
return stword + thword + _plural(word)
def conjunction(words, generalize=False):
if generalize == True:
words = _reflect(words)
# Keep a count of each object in the list of words.
count = {}
for word in words:
if count.has_key(word):
count[word] += 1
else:
count[word] = 1
# Create a list of (count, word) tuples
# which we can sort highest-first.
sortable = []
for word in count:
sortable.append((count[word], word))
sortable.sort()
sortable.reverse()
# Concatenate quantifications of each object,
# starting with the one that has the highest occurence.
phrase = ""
i = 0
for n, word in sortable:
if i == len(count)-2:
separator = " and "
else:
separator = ", "
phrase += quantify(word, n) + separator
i += 1
phrase = phrase.rstrip(separator)
return phrase
#print quantify("chicken", 0)
#print quantify("chicken", 1)
#print quantify("chicken", 2)
#print quantify("chicken", 3)
#print quantify("chicken", 10)
#print quantify("chicken", 100)
#print quantify("chicken", 1000)
#print quantify("chicken", 10000)
#print quantify("chicken", 100000)
#print quantify("chicken", 2000000)
#print conjunction(["goose", "goose", "duck", "chicken", "chicken", "chicken"])
#print conjunction(["penguin", "polar bear"])
#print conjunction(["whale"])
reflect_readable_types = {
"<type '" : "",
"<class '(.*)'\>" : "\\1 class",
"'>" : "",
"objc.pyobjc" : "Python Objective-C",
"objc_class" : "Objective-C class",
"objc" : "Objective-C",
"<objective-c class (.*) at [0-9][0-9|a-z]*>" : "Objective-C \\1 class",
"bool" : "boolean",
"int" : "integer",
"long" : "long integer",
"float" : "float",
"str" : "string",
"dict" : "dictionary",
"NoneType" : "None type",
"instancemethod" : "instance method",
"builtin_function_or_method" : "built-in function",
"classobj" : "class object",
"\." : " ",
"_" : " "
}
def _reflect(object):
""" Returns the type of each object in the given object.
For modules, this means classes and functions etc.
For list and tuples, means the type of each item in it.
For unsubscriptable objects, means the type of the object itself.
"""
types = []
try:
# Classes and modules have a __dict__ attribute
# listing methods, functions etc.
for a in object.__dict__:
a = getattr(object, a)
try:
types.append(str(a.__class__))
except:
types.append(str(type(a)))
# Possibly object is a function.
if len(object.__dict__) == 0:
types.append(str(type(object)))
except:
# Lists and tuples can consist
# of several types of objects.
if isinstance(object, list) \
or isinstance(object, tuple):
for item in object:
types.append(str(type(item)))
# Dictionaries have string keys
# pointing to objects.
elif isinstance(object, dict):
for key in object:
types.append("str key")
types.append(str(type(object[key])))
else:
types.append(str(type(object)))
# Clean up type strings.
import re
for i in range(len(types)):
for p in reflect_readable_types:
types[i] = re.sub(p, reflect_readable_types[p], types[i])
return types
#print conjunction("hello", generalize=True)
#print conjunction(["hello", "goobye"], generalize=True)
#print conjunction((1,2,3,4,5), generalize=True)
#print conjunction({"name": "linguistics", "version": 1.0}, generalize=True)
#print conjunction(conjunction, generalize=True)
#print conjunction(__dict__, generalize=True)
#import Foundation; print conjunction(Foundation, generalize=True)
#import Numeric; print conjunction(Numeric, generalize=True) | Python |
#!/usr/bin/env python
#
# Copyright 2007 John Wiseman <jjwiseman@yahoo.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import sys
import StringIO
import getopt
class RegressiveImageryDictionary:
"""
To use:
1. Load a dictionary.
2. Load an exclusion list (optional).
3. Call analyze.
4. Call display_results with the value returned by analyze.
"""
def __init__(self):
self.category_tree = CategoryRoot()
self.exclusion_patterns = []
self.exclusion_pattern = None
self.pattern_tree = DiscriminationTree('root', None)
def load_dictionary_from_file(self, path):
rid_in = open(path, "r")
try:
self.load_dictionary(rid_in)
finally:
rid_in.close()
def load_dictionary_from_string(self, string):
rid_in = StringIO.StringIO(string)
self.load_dictionary(rid_in)
def load_dictionary(self, stream):
primary_category = None
secondary_category = None
tertiary_category = None
for line in stream:
num_tabs = count_leading_tabs(line)
# The dictionary is in kind of a weird format.
if num_tabs == 0:
primary_category = line.strip()
secondary_category = None
tertiary_category = None
elif num_tabs == 1:
secondary_category = line.strip()
tertiary_category = None
elif num_tabs == 2 and not '(' in line:
tertiary_category = line.strip()
else:
# We have a word pattern.
pattern = line.strip().split(' ')[0].lower()
category = self.ensure_category(primary_category, secondary_category, tertiary_category)
category.add_word_pattern(pattern)
self.pattern_tree.put(pattern, category)
def load_exclusion_list_from_file(self, path):
exc_in = open(path, "r")
try:
self.load_exclusion_list(exc_in)
finally:
exc_in.close()
def load_exclusion_list_from_string(self, string):
exc_in = StringIO.StringIO(string)
self.load_exclusion_list(exc_in)
def load_exclusion_list(self, stream):
for line in stream:
pattern = line.strip().lower()
pattern = pattern.replace("*", ".*")
self.exclusion_patterns.append(pattern)
# One megapattern to exclude them all
self.exclusion_pattern = re.compile('^(' + '|'.join(self.exclusion_patterns) + ')$')
def token_is_excluded(self, token):
return self.exclusion_pattern.match(token)
def get_category(self, word):
categories = self.pattern_tree.retrieve(word)
if categories:
return categories[0]
def analyze(self, text):
results = RIDResults()
def increment_category(category, token):
if not category in results.category_count:
results.category_count[category] = 0
results.category_words[category] = []
results.category_count[category] += 1
results.category_words[category].append(token)
tokens = tokenize(text)
results.word_count = len(tokens)
for token in tokens:
if not self.token_is_excluded(token):
category = self.get_category(token)
if category != None:
increment_category(category, token)
return results
def display_results(self, results):
# Detailed category breakout
total_count = 0
for (category, count) in sorted(results.category_count.items(), key=lambda x: x[1], reverse=True):
print "%-60s %5s" % (category.full_name(), count)
print " " + " ".join(results.category_words[category])
total_count += count
# Summary for each top-level category
top_categories = self.category_tree.children.values()
def get_top_category(cat):
for top_cat in top_categories:
if cat.isa(top_cat):
return top_cat
print "Category %s doesn't exist in %s" % (category, top_categories)
top_category_counts = {}
for top_category in top_categories:
top_category_counts[top_category] = 0
for category in results.category_count:
top_category = get_top_category(category)
if top_category:
top_category_counts[top_category] += results.category_count[category]
print ""
def percent(x, y):
if y == 0:
return 0
else:
return (100.0 * x) / y
for top_category in top_categories:
count = top_category_counts[top_category]
print "%-20s: %f %%" % (top_category.full_name(), percent(count, total_count))
# Word count
print "\n%d words total" % (results.word_count,)
def display_results_html(self, results, title):
# Detailed category breakout
total_count = 0
print "<html><head>"
print "<meta http-equiv='content-type' content='text/html; charset=UTF-8'>"
print """
<style type="text/css">
.word-count { vertical-align: super; font-size: 50%; }
.twisty { color: blue; font-family: monospace; }
a.twisty { text-decoration: none; }
</style>
"""
print "<title>%s</title>" % (title,)
print """
<script>
var TWISTY_EXPANDED = ' ▾ ';
var TWISTY_COLLAPSED = ' ▸ ';
function allWordNodes() {
var nodes = document.getElementsByTagName("tr");
var results = new Array();
var numResults = 0;
for (i = 0; i < nodes.length; i++) {
var node = nodes.item(i);
if (node.className == 'words') {
results[numResults] = node;
numResults++;
}
}
return results;
}
function hideAll() {
allNodes = allWordNodes();
for (var i = 0; i < allNodes.length; i++) {
hide(allNodes[i]);
}
}
function showAll() {
allNodes = allWordNodes();
for (var i = 0; i < allNodes.length; i++) {
show(allNodes[i]);
}
}
function get_twisty_node(category) {
var cell = document.getElementById(category + "-cat");
return cell.childNodes[0];
}
function hide(element) {
element.style.display = "none";
var twisty = get_twisty_node(element.id);
twisty.innerHTML = TWISTY_COLLAPSED;
}
function show(element) {
element.style.display = "";
var twisty = get_twisty_node(element.id);
twisty.innerHTML = TWISTY_EXPANDED;
}
function toggle(cat) {
var node = document.getElementById(cat)
if (node.style.display == "none") {
show(node);
} else {
hide(node);
}
}
</script>
"""
print "</head><body>"
print "<h1>%s</h1>" % (title,)
print "<p><a href='javascript:hideAll()'>- collapse all</a> <a href='javascript:showAll()'>+ expand all</a></p>"
print "<table width='100%'>"
for (category, count) in sorted(results.category_count.items(), key=lambda x: x[1], reverse=True):
sys.stdout.write("<tr>")
sys.stdout.write("<td class='%s' id='%s'>" % ("category", category.full_name() + "-cat"))
sys.stdout.write("""<a class='twisty' href="javascript:toggle('%s')"><span class='twisty'> ▾ </span></a>""" % (category.full_name(),))
sys.stdout.write("%s</td><td width='*' align='right'>%s</td></tr>""" % (category.full_name(), count))
print "<tr class='%s' id='%s'>" % ("words", category.full_name())
print "<td style='padding-left: 1cm;' colspan='2'>"
words = uniq_c(results.category_words[category])
for word in words:
sys.stdout.write("%s<span class='word-count'>%s</span> " % (word))
print "\n</td></tr>"
total_count += count
print "</table>"
# Summary for each top-level category
top_categories = self.category_tree.children.values()
def get_top_category(cat):
for top_cat in top_categories:
if cat.isa(top_cat):
return top_cat
print "Category %s doesn't exist in %s" % (category, top_categories)
top_category_counts = {}
for top_category in top_categories:
top_category_counts[top_category] = 0
for category in results.category_count:
top_category = get_top_category(category)
if top_category:
top_category_counts[top_category] += results.category_count[category]
def percent(x, y):
if y == 0:
return 0
else:
return (100.0 * x) / y
print "<table>"
for top_category in top_categories:
count = top_category_counts[top_category]
print "<tr><td>%s:</td><td>%f %%</td></tr>" % (top_category.full_name(), percent(count, total_count))
print "<table>"
# Word count
print "<p>%d words total</p>" % (results.word_count,)
print "</body></html>"
def ensure_category(self, *args):
def ensure_cat_aux(category, category_path):
if len(category_path) == 0 or category_path[0] == None:
return category
else:
cat = category_path.pop(0)
if not cat in category.children:
category.children[cat] = Category(cat, category)
return ensure_cat_aux(category.children[cat], category_path)
return ensure_cat_aux(self.category_tree, list(args))
class RIDResults:
def __init__(self):
self.category_count = {}
self.category_words = {}
self.word_count = 0
WORD_REGEX = re.compile(r'[^a-zA-Z]+')
def tokenize(string):
tokens = WORD_REGEX.split(string.lower())
tokens = filter(lambda token: len(token) > 0, tokens)
return tokens
def count_leading_tabs(string):
for i, char in enumerate(string):
if char != '\t':
return i
class DiscriminationTree:
"""
This is the discrimination tree we use for mapping words to
categories. The put method is used to insert category nodes in the
tree, associated with some word pattern. The retrieve method finds
the category for a given word, if one exists.
"""
def __init__(self, index, parent):
self.index = index
self.parent = parent
self.leaves = []
self.interiors = []
self.is_wildcard = False
def __str__(self):
return "<DiscriminationTree %s>" % (self.index,)
def child_matching_index(self, index):
for child in self.interiors:
if child.index == index:
return child
return None
def retrieve(self, path):
if len(path) == 0 or self.is_wildcard:
return self.leaves
else:
next_index = path[0]
next_disc_tree = self.child_matching_index(next_index)
if next_disc_tree == None:
return
else:
return next_disc_tree.retrieve(path[1:])
def put(self, path, leaf):
if len(path) == 0:
if isinstance(leaf, DiscriminationTree):
self.interiors.append(leaf)
else:
self.leaves.append(leaf)
return True
else:
next_index = path[0]
if next_index == '*':
# Got a '*' so this is a wildcard node that will match
# anything that reaches it.
self.is_wildcard = True
self.leaves.append(leaf)
else:
next_disc_tree = self.child_matching_index(next_index)
if next_disc_tree == None:
next_disc_tree = DiscriminationTree(next_index, self)
self.interiors.append(next_disc_tree)
next_disc_tree.put(path[1:], leaf)
def dump(self, stream=sys.stdout, indent=0):
stream.write("\n" + " "*indent + str(self))
for child in self.leaves:
stream.write("\n" + " "*(indent + 3) + str(child))
for child in self.interiors:
child.dump(stream=stream, indent=indent + 3)
class Category:
def __init__(self, name, parent):
self.name = name
self.parent = parent
self.children = {}
self.leaves = []
def __str__(self):
return "<Category %s>" % (self.full_name(),)
def add_word_pattern(self, pattern):
self.leaves.append(pattern)
def full_name(self):
if self.parent == None or isinstance(self.parent, CategoryRoot):
return self.name
else:
return self.parent.full_name() + ":" + self.name
def isa(self, parent):
return parent == self or (self.parent and self.parent.isa(parent))
class CategoryRoot(Category):
def __init__(self):
Category.__init__(self, 'root', None)
def full_name(self):
return ""
def uniq_c(words):
words.sort()
results = []
last_word = words[0]
last_word_count = 1
for word in words[1:]:
if word == last_word:
last_word_count += 1
else:
results.append((last_word, last_word_count))
last_word = word
last_word_count = 1
results.append((last_word, last_word_count))
results = sorted(results, key=lambda x: x[1], reverse=True)
return results
# This dictionary is the one from
# http://www.provalisresearch.com/Download/RID.ZIP with misspellings
# fixed.
DEFAULT_RID_DICTIONARY = """
PRIMARY
NEED
ORALITY
ABSINTH* (1)
ALE (1)
ALES (1)
ALIMENTARY (1)
AMBROSIA* (1)
AMBROSIAL* (1)
APPETIT* (1)
APPLE* (1)
ARTICHOK* (1)
ASPARAGU* (1)
BACON* (1)
BANANA* (1)
BEAN* (1)
BEEF* (1)
BEER* (1)
BELCH* (1)
BELLIES (1)
BELLY (1)
BERRI* (1)
BERRY* (1)
BEVERAG* (1)
BISCUIT* (1)
BITE* (1)
BITE (1)
BITES (1)
BITING (1)
BITTEN* (1)
BONBON* (1)
BRANDY* (1)
BREAD* (1)
BREAKFAST* (1)
BREAST* (1)
BREW* (1)
BROTH (1)
BURP* (1)
BUTTER* (1)
BUTTERMILK* (1)
CAFE (1)
CAFES (1)
CAKE (1)
CAKES (1)
CAFETARIA (1)
CANDY* (1)
CANNIBAL* (1)
CAVIAR* (1)
CHAMPAGN* (1)
CHEES* (1)
CHERRI* (1)
CHERRY* (1)
CHESTNUT* (1)
CHEW* (1)
CHOK* (1)
CIDER* (1)
CLARET* (1)
COB (1)
COBS (1)
COCOA* (1)
COCOANUT* (1)
COCONUT* (1)
COFFE* (1)
CONSUM* (1)
COOK* (1)
CORN* (1)
COUGH* (1)
CRANBERRY* (1)
CREAM* (1)
DELICACI* (1)
DELICACY* (1)
DESSERT* (1)
DEVOUR* (1)
DIET* (1)
DIGEST* (1)
DINE (1)
DINES (1)
DINING (1)
DINNER* (1)
DISH (1)
DISHES (1)
DRANK* (1)
DRINK* (1)
DRUNK* (1)
DRUNKEN* (1)
EAT* (1)
EATEN* (1)
EGG* (1)
ENTRAIL* (1)
FAMIN* (1)
FAMISH* (1)
FAST (1)
FASTS (1)
FAT (1)
FATTEN* (1)
FEAST* (1)
FED (1)
FEED (1)
FEEDS (1)
FIG (1)
FIGS (1)
FLOUR* (1)
FOOD* (1)
FOODSTUFF* (1)
FORK* (1)
FRUIT* (1)
GARLIC* (1)
GIN (1)
GINGER* (1)
GINS (1)
GLUTTON* (1)
GLUTTONOU* (1)
GNAW* (1)
GOBBL* (1)
GRAIN* (1)
GRAP* (1)
GROG* (1)
GRUEL* (1)
GULP* (1)
GUM (1)
GUMS (1)
GUT (1)
GUTS (1)
HAM (1)
HAMS (1)
HERB* (1)
HONEY* (1)
HUNGER* (1)
HUNGRY* (1)
IMBIB* (1)
INEDIBL* (1)
INTESTIN* (1)
JAW* (1)
JUIC* (1)
LAP (1)
LAPS (1)
LEMON* (1)
LICK* (1)
LIME (1)
LIMES (1)
LIP (1)
LIPS (1)
LIQUEUR* (1)
LIQUOR* (1)
LUNCH* (1)
MAIZ* (1)
MEAL* (1)
MEAT* (1)
MELON* (1)
MENU* (1)
MILK* (1)
MINT* (1)
MORSEL* (1)
MOUTH* (1)
MOUTHFUL* (1)
MUSHROOM* (1)
MUTTON* (1)
NAUS* (1)
NECTAR* (1)
NIBBL* (1)
NOURISH* (1)
NOURISHMENT* (1)
NURTUR* (1)
NUT (1)
NUTS (1)
OLIV* (1)
ORAL* (1)
PALAT* (1)
PARTAK* (1)
PASTRI* (1)
PASTRY* (1)
PEA (1)
PEANUT* (1)
PEAR* (1)
PEAS (1)
PEPPER* (1)
PHILTR* (1)
PINEAPPL* (1)
POISON* (1)
PORK* (1)
PORRIDG* (1)
POT (1)
POTATO* (1)
POTBEL* (1)
POTS (1)
PUCKER* (1)
PUMPKIN* (1)
QUENCH* (1)
RASPBERRY* (1)
RAW (1)
RAWLY (1)
REPAST* (1)
RESTAURANT* (1)
RESTAURENT* (1)
RICE (1)
RICES (1)
RIPENES* (1)
ROAST* (1)
RUM (1)
RUMS (1)
SALAD* (1)
SALIVA* (1)
SALIVAT* (1)
SALT* (1)
SAUC* (1)
SAUERKRAUT* (1)
SESAM* (1)
SHERBERT* (1)
SHERRY* (1)
SOUP* (1)
SPAT* (1)
SPIT* (1)
SPITTL* (1)
SPOON* (1)
STARV* (1)
STARVAT* (1)
STOMACH* (1)
STRAWBERRI* (1)
STRAWBERRY* (1)
SUCK* (1)
SUCKL* (1)
SUGAR* (1)
SUPPER* (1)
SWALLOW* (1)
TEA (1)
TEAS (1)
TEAT* (1)
TEETH* (1)
THIRST* (1)
THIRSTY* (1)
THROAT* (1)
TIT (1)
TITS (1)
TOMATO* (1)
TONGU* (1)
TOOTH* (1)
UNCOOK* (1)
VEAL* (1)
VEGETABL* (1)
VENISON* (1)
VODKA* (1)
VOMIT* (1)
WHEAT* (1)
WHISKEY* (1)
WHISKY* (1)
YAM (1)
YAMS* (1)
YEAST* (1)
ANALITY
ANAL (1)
ANUS (1)
ANUSES (1)
ARSE (1)
ARSEHOL* (1)
ASSES (1)
ASS-HOL* (1)
ASSHOL* (1)
BESHAT* (1)
BESHIT* (1)
BESMEAR* (1)
BILE* (1)
BOWEL* (1)
BUTTOCK* (1)
CACK* (1)
CESSPOOL* (1)
CLOACA* (1)
CLOT (1)
CLOTS* (1)
CONSTIPAT* (1)
DANK* (1)
DAUB* (1)
DEFECAT* (1)
DEFIL* (1)
DELOUS* (1)
DIARRHOEA* (1)
DIRT* (1)
DIRTY* (1)
DISGUST* (1)
DUNG* (1)
DUNGHILL* (1)
EFFLUVIA* (1)
EFFLUVIUM* (1)
ENEMA* (1)
EXCRET* (1)
FART (1)
FARTS (1)
FECAL* (1)
FECES (1)
FETID* (1)
FETOR* (1)
FILTH* (1)
FILTHY* (1)
IMPUR* (1)
LATRIN* (1)
LICE (1)
LOATHSOM* (1)
LOUS* (1)
MAGGOT* (1)
MAGGOTY* (1)
MALODOROU* (1)
MALODOUROU* (1)
MANUR* (1)
MESS (1)
MESSES (1)
MESSING (1)
MIASMA* (1)
MUD (1)
MUDDY* (1)
MUDS (1)
OFFAL* (1)
OOZ* (1)
OOZY* (1)
OUTHOUS* (1)
PISS* (1)
POLLUT* (1)
PUTRESCENC* (1)
PUTRESCENT* (1)
PUTRID* (1)
RANCID* (1)
RECTUM* (1)
REEK* (1)
ROT (1)
ROTS (1)
ROTTEN* (1)
ROTTING (1)
RUMP* (1)
SCUM* (1)
SEWER* (1)
SHAT* (1)
SHIT* (1)
SLIMY* (1)
SMEAR* (1)
SODOMIST* (1)
SODOMY* (1)
SOIL* (1)
STAL* (1)
STENCH* (1)
STINK* (1)
SWEAT* (1)
UNCLEAN* (1)
UNWASH* (1)
URIN* (1)
SEX
VENEREAL* (1)
ADULTERER* (1)
ADULTERY* (1)
ALLUR* (1)
BAWD* (1)
BAWDY* (1)
BITCH* (1)
BROTHEL* (1)
CARESS* (1)
CARNAL* (1)
CIRCUMCIS* (1)
CLITORI* (1)
COHABIT* (1)
COITU* (1)
CONCUBIN* (1)
COPULAT* (1)
COQUETT* (1)
COQUETTISH* (1)
COURTESAN* (1)
CUCKOLD* (1)
CUNT* (1)
CUPID* (1)
DEBAUCH* (1)
DEFLOWER* (1)
EJACULAT* (1)
EROTIC* (1)
FONDL* (1)
FORNICAT* (1)
FUCK* (1)
GENITAL* (1)
GENITALIA* (1)
GIRDL* (1)
GROIN* (1)
HAREM* (1)
HARLOT* (1)
HOMOSEXUAL* (1)
HOMOSEXUALITY* (1)
IMMODEST* (1)
INCEST* (1)
INCESTUOU* (1)
INDECENT* (1)
INDISCRET* (1)
INFATUAT* (1)
KISS* (1)
LASCIVIOU* (1)
LECHER* (1)
LECHEROU* (1)
LECHERY* (1)
LEER (1)
LEERS (1)
LEWD* (1)
LIBERTIN* (1)
LICENTIOU* (1)
LOVER* (1)
LUST* (1)
LUSTFUL* (1)
LUSTY* (1)
MASTURBAT* (1)
MENSTRUAL* (1)
MENSTRUAT* (1)
MISTRES* (1)
NAKED (1)
NUDE (1)
NUDES (1)
OBSCEN* (1)
OBSCENITY* (1)
ORGASM* (1)
ORGI* (1)
ORGY* (1)
PANDER* (1)
PARAMOUR* (1)
PENI* (1)
PERVERS* (1)
PERVERT* (1)
PHALLIC* (1)
PHALLU* (1)
PREGNANCY* (1)
PREGNANT* (1)
PROCREAT* (1)
PROSTITUT* (1)
PRURIENT* (1)
PUBERTY* (1)
PUBI* (1)
PUBIC* (1)
RAPE* (1)
RAPING* (1)
RIBALD* (1)
SATYR* (1)
SEDUC* (1)
SENSUAL* (1)
SENSUOU* (1)
SEX (1)
SEXED (1)
SEXES (1)
SEX-LINKED (1)
SEXUAL* (1)
SEXY* (1)
SHAMELES* (1)
SLATTERN* (1)
SLUT* (1)
SLUTTY* (1)
TESTI* (1)
TESTICL* (1)
THIGH* (1)
TROLLOP* (1)
UNBLUSH* (1)
UNDRES* (1)
VAGINA* (1)
VENU* (1)
VOLUPTUOU* (1)
VULVA* (1)
WAIST* (1)
WANTON* (1)
WHOR* (1)
WOMB* (1)
SENSATION (1)
SENSATION
TOUCH
BRUSH* (1)
COARS* (1)
CONTACT* (1)
CUDD* (1)
CUDDL* (1)
HANDL* (1)
ITCH* (1)
ITCHY* (1)
MASSAG* (1)
PRICKL* (1)
ROUGH* (1)
RUB (1)
RUBB* (1)
RUBS (1)
SCALY (1)
SCRATCH* (1)
SHARP* (1)
SLICK* (1)
SLIPPERY* (1)
SMOOTH* (1)
SNUGGL* (1)
STING (1)
STINGS (1)
STROK* (1)
TEXTUR* (1)
THICK* (1)
TICKL* (1)
TINGL* (1)
TOUCH* (1)
WAXY* (1)
TASTE
AFTERTAST* (1)
BITTER* (1)
DELECTABL* (1)
DELICIOU* (1)
FLAVOR* (1)
GALL (1)
HONI* (1)
LUSCIOU* (1)
PIQUANT* (1)
SAVOR* (1)
SAVORY* (1)
SAVOUR* (1)
SAVOURY* (1)
SOUR* (1)
SPIC* (1)
SPICY* (1)
SUGARY* (1)
SWEET* (1)
SWEETNES* (1)
TANG* (1)
TANGY* (1)
TART* (1)
TAST* (1)
TASTY* (1)
TOOTHSOM* (1)
UNPALATABL* (1)
UNSAVORY* (1)
VINEGAR* (1)
VINEGARY* (1)
ODOR
AROMA* (1)
AROMATIC* (1)
BREATH* (1)
COLOGN* (1)
FRAGRANC* (1)
FRAGRANT* (1)
FUME* (1)
FUMING* (1)
INCENS* (1)
INHAL* (1)
MUSK* (1)
MUSKY* (1)
MUSTY* (1)
NOSE* (1)
NOSTRIL* (1)
ODOR* (1)
ODOUR* (1)
PERFUM* (1)
PUNGENC* (1)
PUNGENT* (1)
SCENT* (1)
SMEL* (1)
SMELL* (1)
SNIF* (1)
SNIFF* (1)
GENERAL-SENSATION
APPERCEIVE (1)
APPERCEPTIVE (1)
ATTENT* (1)
AWAR* (1)
AWARENES* (1)
BALMY* (1)
BASK* (1)
BEAUTIFUL* (1)
BEAUTY* (1)
CHARM* (1)
COMFORT* (1)
COMFORTABL* (1)
CREAMY* (1)
FAIR* (1)
IMPRESS* (1)
LOVELINES* (1)
LUSH* (1)
LUXURIOU* (1)
LUXURY* (1)
MILKY* (1)
OVERSENSITIV* (1)
PERCEIV* (1)
PERCEPT* (1)
PERCEPTUAL (1)
PHYSICAL* (1)
PLEASANT* (1)
PRETTY* (1)
REFRESH* (1)
RELISH* (1)
REVEL* (1)
SENSAT* (1)
SENSITIV* (1)
STIMULAT* (1)
SUMPTUOU* (1)
SOUND
AUDITORILLY (1)
ALOUD* (1)
AUDIBL* (1)
AUDITION (1)
AUDITORY* (1)
AURAL (1)
BANG* (1)
BELL* (1)
BINAURAL (1)
BLAR* (1)
BOOM* (1)
BUZZ* (1)
CHORD* (1)
CHORU* (1)
CLACK* (1)
CLAMOR* (1)
CLAMOUR* (1)
CLANG* (1)
CRACKL* (1)
CROAK* (1)
DEAF* (1)
DRON* (1)
DRUM* (1)
EAR (1)
EARS (1)
ECHO* (1)
HARK* (1)
HEAR* (1)
HEARD* (1)
HISS* (1)
HUM (1)
HUMM* (1)
HUMS (1)
LISTEN* (1)
LOUD* (1)
LOUDER* (1)
MELODI* (1)
MELODIOU* (1)
MELODY* (1)
MUFFL* (1)
MUSIC* (1)
MUSICAL* (1)
NOIS* (1)
NOISY* (1)
PEAL* (1)
PURR* (1)
RACKET* (1)
RASP* (1)
RATTL* (1)
RAUCOU* (1)
RESONANT* (1)
RESOUND* (1)
RHYTHM* (1)
RING* (1)
RUMBL* (1)
RUSTL* (1)
SERENAD* (1)
SHRILL* (1)
SNAP* (1)
SONOROU* (1)
SOUND* (1)
STRIDANT* (1)
STRIDENT* (1)
SWISH* (1)
SYMPHONY* (1)
TEMPO* (1)
THUD* (1)
TIMBR* (1)
TINKL* (1)
TONAL (1)
TONE (1)
TONED (1)
TONES (1)
TRILL* (1)
TUNE (1)
TUNED (1)
TUNES (1)
TUNING* (1)
UNHEARD* (1)
VOCAL* (1)
VOIC* (1)
WHIR* (1)
WHIRR* (1)
WHISTL* (1)
AFTER-IMAGE* (1)
VISION
BLINK* (1)
ILLUMINANT (1)
INVISIBILITY (1)
MONOCULAR (1)
AMBER* (1)
APPEAR* (1)
APPEARANC* (1)
AURORA* (1)
AZUR* (1)
BEAM* (1)
BEHOLD* (1)
BINOCULAR (1)
BLUE* (1)
BLUISH* (1)
BRIGHT* (1)
BROWN* (1)
BRUNETT* (1)
CHROMATIC* (1)
COLOR* (1)
COLOUR* (1)
COMPLEX* (1)
CRIMSON* (1)
DISCERN* (1)
DYE* (1)
EMERALD* (1)
FILM* (1)
FLASH* (1)
FLICKER* (1)
FLOURESCENT* (1)
GAZE* (1)
GAZING* (1)
GLANC* (1)
GLAR* (1)
GLEAM* (1)
GLIMPS* (1)
GLINT* (1)
GLISTEN* (1)
GLITTER* (1)
GLOSSY* (1)
GLOW* (1)
GRAY* (1)
GREEN* (1)
GREY* (1)
HALO* (1)
HUE* (1)
ILLUMINAT* (1)
IMAG* (1)
INVISIBL* (1)
LAMP* (1)
LANTERN* (1)
LAVENDER* (1)
LIGHT* (1)
LIGHTEN* (1)
LIGHTN* (1)
LIMPID* (1)
LOOK* (1)
LUCID* (1)
LUMINANCE (1)
LUMINOU* (1)
LUSTER* (1)
LUSTROU* (1)
MOONBEAM* (1)
MOONLIGHT* (1)
NOTIC* (1)
OBSERV* (1)
OPAQU* (1)
PAINT* (1)
PEEK* (1)
PEER* (1)
PICTUR* (1)
PINK* (1)
RADIANC* (1)
RADIANT* (1)
RAY (1)
RAYS (1)
REGARD* (1)
ROSY* (1)
ROUG* (1)
RUBY* (1)
RUDDY* (1)
SAPPHIR* (1)
SAW (1)
SCAN (1)
SCANN* (1)
SCANS (1)
SCARLET* (1)
SCEN* (1)
SCENIC* (1)
SEE (1)
SEEING* (1)
SEEN* (1)
SEES (1)
SHEEN* (1)
SHIMMER* (1)
SHIN* (1)
SHON* (1)
SIGHT* (1)
SPARKL* (1)
SPIED (1)
SPIES (1)
SPY (1)
SPYING* (1)
STAR (1)
STARLIGHT* (1)
STARS (1)
SUNLIGHT* (1)
SUNSHIN* (1)
SURVEY* (1)
TAN (1)
TANNED (1)
TANNING* (1)
TANS (1)
TINT* (1)
TRANSLUCENT* (1)
TRANSPARENT* (1)
TWINKL* (1)
UNSEEN* (1)
VIEW* (1)
VIOLET* (1)
VISIBL* (1)
VISION* (1)
VISUAL* (1)
WATCH* (1)
WITNES* (1)
YELLOW* (1)
COLD
ALASKA* (1)
ARCTIC* (1)
BENUMB* (1)
CHIL* (1)
CHILL* (1)
COLD* (1)
COLDER* (1)
COOL* (1)
FREEZ* (1)
FRIGID* (1)
FROST* (1)
FROSTBIT* (1)
FROZ* (1)
FROZEN* (1)
GLACIER* (1)
HOAR* (1)
ICE* (1)
ICINES* (1)
ICING* (1)
ICY (1)
NORTH* (1)
NORTHERN* (1)
NUMB (1)
NUMBNESS* (1)
POLAR* (1)
SHIVER* (1)
SIBERIA* (1)
SLEET* (1)
SNOW* (1)
SNOWSTORM* (1)
SNOWY* (1)
THUL* (1)
WINTER* (1)
WINTRY* (1)
HARD
ALABASTER* (1)
BRAS* (1)
BRASSY* (1)
BRAZEN* (1)
BRITTL* (1)
BRONZ* (1)
COPPER* (1)
CRISP* (1)
CRISPY* (1)
GLAS* (1)
GLASSY* (1)
GRANIT* (1)
GRAVEL* (1)
HARD* (1)
IRON* (1)
MARBL* (1)
METAL* (1)
METALLIC* (1)
NAIL* (1)
PEBB* (1)
PORCELAIN* (1)
RIGID* (1)
ROCK* (1)
SOLID* (1)
SPLINTER* (1)
STEEL* (1)
STIFF* (1)
STON* (1)
STONY* (1)
ZINC* (1)
SOFT
DAMASK* (1)
DELICAT* (1)
DOWNY* (1)
FEATHER* (1)
FLEEC* (1)
FLEECY* (1)
FLUFFY* (1)
GENTL* (1)
GENTLENES* (1)
GOSSAMER* (1)
LACE (1)
LACES (1)
LACING* (1)
LACY* (1)
MELLOW* (1)
MILD* (1)
MURMUR* (1)
PLIANT* (1)
POWDERY* (1)
SATIN* (1)
SATINY* (1)
SILK* (1)
SOFT* (1)
TENDER* (1)
TING* (1)
VELVET* (1)
VELVETY* (1)
WHISPER* (1)
DEFENSIVE SYMBOLIZATION
PASSIVITY
STAGNANT (1)
APATHETIC* (1)
APATHY* (1)
BED (1)
BEDD* (1)
BEDS (1)
BOREDOM* (1)
CALM* (1)
CONTENTED* (1)
CONTENTMENT* (1)
COUCH* (1)
COZY* (1)
DEAD* (1)
DEATH* (1)
DECAY* (1)
DIE (1)
DIED* (1)
DIES (1)
DORMANT* (1)
DRIFT* (1)
DYING* (1)
EASE* (1)
EASED (1)
EASES (1)
HUSH* (1)
IDL* (1)
IMMOBIL* (1)
INACTIV* (1)
INACTIVITY* (1)
INDIFFERENC* (1)
INDIFFERENT* (1)
INDOLENT* (1)
INERT* (1)
INERTIA* (1)
INNERT* (1)
LAID* (1)
LAIN* (1)
LANGOROU* (1)
LANGUID* (1)
LANGUISH* (1)
LANGUOR* (1)
LASSITUD* (1)
LAY (1)
LAYING* (1)
LAYS (1)
LAZY* (1)
LEADEN* (1)
LEISUR* (1)
LETHARGIC* (1)
LETHARGY* (1)
LIE (1)
LIES (1)
LINGER* (1)
LISTLES* (1)
LUL* (1)
LULL* (1)
MOTIONLES* (1)
NESTL* (1)
NONCHALANC* (1)
NONCHALANT* (1)
PASSIV* (1)
PASSIVITY* (1)
PEACEFUL* (1)
PERISH* (1)
PHLEGMATIC* (1)
PLACID* (1)
PROCRASTINAT* (1)
QUIET* (1)
RELAX* (1)
RELAXAT* (1)
REPOS* (1)
REST* (1)
RESTFUL* (1)
RETIR* (1)
SAFE (1)
SAFELY (1)
SAFETY* (1)
SECUR* (1)
SECURITY* (1)
SEDENTARY* (1)
SEREN* (1)
SERENITY* (1)
SILENC* (1)
SILENT* (1)
SLACK* (1)
SLOTHFUL* (1)
SLOW* (1)
SLUGGISH* (1)
SOLAC* (1)
SOOTH* (1)
STAGNAT* (1)
STATIC* (1)
STILLNES* (1)
SUBMISS* (1)
SUBMISSIV* (1)
SUBMIT* (1)
SUCCUMB* (1)
TRANQ* (1)
UNHURRI* (1)
VAGRANT* (1)
VELLEITY* (1)
WEARISOM* (1)
WEARY* (1)
YIELD* (1)
VOYAGE
CARAVAN* (1)
CHAS* (1)
CRUIS* (1)
DESERT* (1)
DRIV* (1)
EMBARK* (1)
EMIGRAT* (1)
EXPLOR* (1)
IMMIGRAT* (1)
IMMIGRANT* (1)
JOURNEY* (1)
MIGRAT* (1)
NAVIGAT* (1)
NOMAD* (1)
NOMADIC* (1)
OSCILLAT* (1)
PILGRIM* (1)
PILGRIMAG* (1)
RIDE (1)
RIDES (1)
RIDING* (1)
ROAM* (1)
RODE (1)
ROV* (1)
SAIL* (1)
SAILOR* (1)
SEAFAR* (1)
SEARCH* (1)
SHIP* (1)
STRAY* (1)
TOUR* (1)
TOURIST* (1)
TRAVEL* (1)
TREK* (1)
TRIP* (1)
VAGABOND* (1)
VOYAG* (1)
WANDER* (1)
WANDERLUST* (1)
WAYFARER* (1)
WILDERNES* (1)
YONDER* (1)
RANDOM MOVEMENT
ACTIVITI* (1)
ACTIVITY* (1)
AGITAT* (1)
CHURN* (1)
COMMOT* (1)
CONVULS* (1)
EXPAND* (1)
EXPANS* (1)
FIDGET* (1)
FLOUNDER* (1)
FLURRI* (1)
FLURRY* (1)
JERK* (1)
LURCH* (1)
ORBIT* (1)
PITCH* (1)
PIVOT* (1)
PULS* (1)
PULSAT* (1)
QUAK* (1)
QUIVER* (1)
REEL* (1)
REVOLV* (1)
ROL* (1)
ROLL* (1)
ROTAT* (1)
SEETH* (1)
SHAK* (1)
SHOOK* (1)
SPASM* (1)
SPIN* (1)
SPREAD* (1)
STAGGER* (1)
STIR* (1)
SWAY* (1)
SWEL* (1)
SWELL* (1)
SWIVEL* (1)
SWOLLEN* (1)
THROB* (1)
TOTTER* (1)
TWICH* (1)
TWIST* (1)
TWITCH* (1)
UNDULAT* (1)
VIBRAT* (1)
WAVE (1)
WAVED (1)
WAVES (1)
WAVING* (1)
WHIRL* (1)
WOBBL* (1)
DIFFUSION
BLUR* (1)
CLOUD* (1)
CLOUDY* (1)
CURTAIN* (1)
DARKEN* (1)
DIFFUS* (1)
DIM (1)
DIMM* (1)
DIMS (1)
EQUIVOCAL* (1)
FADE (1)
FADED (1)
FADES* (1)
FADING* (1)
FOG (1)
FOGG* (1)
FOGS (1)
HAZE* (1)
HAZING* (1)
HAZY* (1)
INDEFINIT* (1)
INDISTINCT* (1)
MIST* (1)
MISTY* (1)
MURKINES* (1)
MURKY* (1)
NEBULA* (1)
NEBULOU* (1)
OBSCUR* (1)
OVERCAST* (1)
SCREEN* (1)
SHAD* (1)
SHADOW* (1)
SHADOWY* (1)
SHADY* (1)
TWILIGHT* (1)
UNCERTAIN* (1)
UNCERTAINT* (1)
UNCLEAR* (1)
VAGU* (1)
VAPOR* (1)
VAPOUR* (1)
VEIL* (1)
CHAOS
AIMLES* (1)
AMBIGUIT* (1)
AMBIGUOU* (1)
ANARCHY* (1)
CHANC* (1)
CHAO* (1)
CHAR (1)
CHARS (1)
CATASTROPHE (1)
CONFUS* (1)
CROWD* (1)
DISCORD* (1)
DISCORDANT* (1)
DISHEVEL* (1)
DISORDER* (1)
ENTANGL* (1)
GORDIAN* (1)
HAPHAZARD* (1)
IRREGULAR* (1)
JUMBL* (1)
JUNGL* (1)
LABYRINTH* (1)
LAWLES* (1)
LITTER* (1)
MOB (1)
MOBB* (1)
MOBS (1)
OVERGROWN* (1)
OVERRUN* (1)
PERPLEX* (1)
RANDOM* (1)
RUIN* (1)
UNRU* (1)
WILD* (1)
REGRESSIVE COGNITION
UNKNOWN
BIZZAR* (1)
BODILES* (1)
BOUNDLES* (1)
CRYPTIC* (1)
ENIGMA* (1)
ESOTERIC* (1)
EXOTIC* (1)
FANTASTIC* (1)
FORMLES* (1)
IMMEASURABL* (1)
INCONCEIVABL* (1)
INCREDIBL* (1)
INDESCRIBABL* (1)
INEFFABL* (1)
INFINITY* (1)
INSCRUTABL* (1)
LIMITLES* (1)
MAGI* (1)
MAGIC* (1)
MAGU* (1)
MARVEL* (1)
MYST* (1)
NAMELES* (1)
NOTHINGNES* (1)
NUMBERLES* (1)
OCCULT* (1)
ODD* (1)
SECRECY* (1)
SECRET* (1)
SHAPELES* (1)
SORCERER* (1)
SORCERES* (1)
STRANG* (1)
TRANSCEND* (1)
UNBELIEVABL* (1)
UNBOUND* (1)
UNIMAGINABL* (1)
UNKNOWN* (1)
UNLIMIT* (1)
UNSPEAKABL* (1)
UNTOLD* (1)
VOID* (1)
TIMELESSNESS
AEON* (1)
CEASELES* (1)
CENTURI* (1)
CENTURY* (1)
CONTINUAL* (1)
CONTINUOU* (1)
ENDLES* (1)
ENDUR* (1)
EON* (1)
ETERNAL* (1)
ETERNITY* (1)
EVERLAST* (1)
FOREVER* (1)
IMMORTAL* (1)
INCESSANT* (1)
LIFETIM* (1)
OUTLIV* (1)
PERMANENC* (1)
PERMANENT* (1)
PERPETUAL* (1)
TIMELESSNES* (1)
UNCEAS* (1)
UNDY* (1)
UNEND* (1)
TEST5
CONSCIOUSNESS ALTERATION
AMUCK* (1)
ASLEEP* (1)
AWAK* (1)
AWAKEN* (1)
BEDLAM* (1)
COMA* (1)
CRAZ* (1)
CRAZY* (1)
DELIRIOU* (1)
DELIRIUM* (1)
DELPHIC* (1)
DEMENT* (1)
DOZE (1)
DOZED (1)
DOZES (1)
DOZING (1)
DREAM* (1)
DREAMY* (1)
DROWSY* (1)
ECSTACY* (1)
ECSTASY* (1)
ECSTATIC* (1)
ENCHANT* (1)
EPILEPSY* (1)
EPILEPTIC* (1)
EXSTASY* (1)
FAINT* (1)
FANTASI* (1)
FANTASY* (1)
FEBRIL* (1)
FEVER* (1)
FEVERISH* (1)
FRENZY* (1)
HALLUCINAT* (1)
HASHISH* (1)
HIBERNAT* (1)
HYPNO* (1)
HYSTERIA* (1)
HYSTERIC* (1)
IMAGIN* (1)
IMAGINAT* (1)
INSAN* (1)
INSANITY* (1)
INTUIT* (1)
IRRATIONAL* (1)
LAUDANUM* (1)
LUNACY* (1)
LUNATIC* (1)
MAD (1)
MADLY (1)
MADMAN* (1)
MADMEN* (1)
MADNES* (1)
MADWOMAN* (1)
MADWOMEN* (1)
MANIA* (1)
MANIAC* (1)
MEDITAT* (1)
MESMERIZ* (1)
MONOMANIA* (1)
NAP (1)
NAPP* (1)
NAPS (1)
NEUROSI* (1)
NEUROTIC* (1)
NIGHTMAR* (1)
NIGHTMARISH* (1)
OPIUM* (1)
OPIATES (1)
ORACL* (1)
PARANO* (1)
PREMONIT* (1)
PSYCHIC* (1)
PSYCHOSI* (1)
PSYCHOTIC* (1)
RAPTUR* (1)
RAPTUROU* (1)
REVERI* (1)
REVERY* (1)
REVIV* (1)
SEER* (1)
SLEEP* (1)
SLEEPY* (1)
SLUMBER* (1)
STUPOR* (1)
SWOON* (1)
TELEPATHY* (1)
TRANC* (1)
UNREASON* (1)
VERTIGO* (1)
VISIONARY* (1)
WAK* (1)
WOKE (1)
BRINK-PASSAGE
ACCES* (1)
AISL* (1)
AQUEDUCT* (1)
ARTERI* (1)
ARTERY* (1)
AVENU* (1)
BARRIER* (1)
BORDER* (1)
BOUNDARI* (1)
BOUNDARY* (1)
BRIDG* (1)
BRIM* (1)
BRINK* (1)
CANAL* (1)
CHANNEL* (1)
COAST* (1)
CONDUIT* (1)
CORRIDOR* (1)
CURB* (1)
DOOR* (1)
DOORSTEP* (1)
DOORWAY* (1)
EDG* (1)
ENTRANC* (1)
ENTRY* (1)
FENC* (1)
FERRI* (1)
FERRY* (1)
FLOOR* (1)
FOOTPATH* (1)
FOYER* (1)
FRAM* (1)
FRING* (1)
FRONTIER* (1)
GATE* (1)
GATING* (1)
HALL* (1)
HALLWAY* (1)
HIGHWAY* (1)
HORIZON* (1)
LANE (1)
LANES (1)
LEDG* (1)
LINE (1)
LINED (1)
LINES (1)
LINING* (1)
MARGIN* (1)
PASSAG* (1)
PASSAGEWAY* (1)
PATH* (1)
PERIMET* (1)
PERIPHER* (1)
PORT* (1)
RAILROAD* (1)
RAILWAY* (1)
RIM (1)
RIMM* (1)
RIMS (1)
ROAD* (1)
ROUT* (1)
SIDEWALK* (1)
SKYLIN* (1)
STAIR* (1)
STEP* (1)
STREET* (1)
THRESHOLD* (1)
TRAIL* (1)
VERG* (1)
VIADUCT* (1)
VISTA* (1)
WALL* (1)
WINDOW* (1)
NARCISSISM
ARM (1)
ARMS (1)
BEARD* (1)
BLOOD* (1)
BODI* (1)
BODY* (1)
BONE (1)
BONES (1)
BRAIN* (1)
BROW (1)
BROWS (1)
CHEEK* (1)
CHEST* (1)
CHIN* (1)
CORPS* (1)
EYE* (1)
FACE (1)
FACES (1)
FACIES (1)
FEET* (1)
FLESH* (1)
FOOT* (1)
FOREHEAD* (1)
HAIR* (1)
HAND* (1)
HEAD* (1)
HEART* (1)
HEEL* (1)
HIP (1)
HIPS (1)
KIDNEY* (1)
KNEE (1)
KNEES (1)
LEG (1)
LEGS (1)
LIMB* (1)
LIVER* (1)
MUSCL* (1)
NAVEL* (1)
NECK* (1)
ORGAN* (1)
PALM* (1)
RIB (1)
RIBS (1)
SHOULDER* (1)
SKIN* (1)
SKULL* (1)
THUMB* (1)
TOE (1)
TOES (1)
VEIN* (1)
WRIST* (1)
CONCRETENESS
ACROS* (1)
AFAR* (1)
AFIELD* (1)
AHEAD* (1)
ALONG* (1)
AMONG* (1)
APART* (1)
ASID* (1)
AT (1)
AWAY* (1)
BACK* (1)
BEHIND* (1)
BESID* (1)
BETWEEN* (1)
CENTER* (1)
CENTR* (1)
CIRCL* (1)
CLOS* (1)
CLOSER* (1)
CORNER* (1)
CURV* (1)
DISTANC* (1)
DISTANT* (1)
EAST* (1)
EASTERN* (1)
EVERYWHER* (1)
EXTEND* (1)
EXTENSIV* (1)
EXTENT* (1)
FAR (1)
FARTHER* (1)
FLAT* (1)
FORWARD* (1)
FRONT* (1)
FURTHER* (1)
HERE (1)
HITHER* (1)
INSID* (1)
INTERIOR* (1)
LAYER* (1)
LENGTH* (1)
LEVEL* (1)
LONG* (1)
MIDDL* (1)
MIDST* (1)
NARROW* (1)
NEAR* (1)
NEARBY* (1)
NEARER* (1)
NEAREST* (1)
OFF (1)
OPEN* (1)
OUT (1)
OUTING* (1)
OUTS (1)
OUTSID* (1)
OUTWARD* (1)
OVER* (1)
PLAC* (1)
POINT* (1)
POSIT* (1)
REAR* (1)
REGION* (1)
ROUND* (1)
SEPARAT* (1)
SIDE (1)
SIDED (1)
SIDES (1)
SIDING* (1)
SITUAT* (1)
SOMEWHER* (1)
SOUTH* (1)
SPAC* (1)
SPACIOU* (1)
SPATIAL (1)
SQUAR* (1)
STRAIGHT* (1)
SURFAC* (1)
SURROUND* (1)
THENC* (1)
THITHER* (1)
TIP (1)
TIPP* (1)
TIPS (1)
TOWARD* (1)
WEST* (1)
WESTERN* (1)
WHER* (1)
WHEREVER* (1)
WIDE* (1)
WIDTH* (1)
WITHIN* (1)
ICARIAN IMAGERY
ASCENT
ALOFT* (1)
ARIS* (1)
ARISEN* (1)
AROS* (1)
ASCEND* (1)
ASCENS* (1)
BOUNC* (1)
CLIMB* (1)
DANGL* (1)
DAWN* (1)
FLAP* (1)
FLED (1)
FLEW* (1)
FLIER* (1)
FLIGHT* (1)
FLING* (1)
FLOAT* (1)
FLOWN* (1)
FLUNG* (1)
FLUTTER* (1)
FLY* (1)
HANG* (1)
HOVER* (1)
HURL* (1)
ICARIAN* (1)
ICARU* (1)
JUMP* (1)
LEAP* (1)
LEPT* (1)
LIFT* (1)
MOUNT* (1)
MOUNTAINSID* (1)
RISE (1)
RISEN* (1)
RISES (1)
RISING* (1)
SOAR* (1)
SPRANG* (1)
SPRING* (1)
SPRUNG* (1)
SUNRIS* (1)
SWING* (1)
THREW* (1)
THROW* (1)
THROWN* (1)
TOSS* (1)
UPHILL* (1)
UPWARD* (1)
WING* (1)
HEIGHT
ABOV* (1)
AERIAL* (1)
AIRPLAN* (1)
ARCH (1)
ATMOSPHER* (1)
BALCONY* (1)
BATTLEMENT* (1)
BIRD* (1)
BRANCH* (1)
CEIL* (1)
CLIFF* (1)
CRAG* (1)
CRAGGY* (1)
DOME (1)
DOMES (1)
DOMING (1)
ELEVAT* (1)
ERECT* (1)
GREW* (1)
GROW* (1)
GROWN* (1)
HEAP* (1)
HEAVEN* (1)
HEIGHT* (1)
HIGH* (1)
HIGHER* (1)
HILL* (1)
HILLSID* (1)
HILLTOP* (1)
HUNG* (1)
LADDER* (1)
LOFT* (1)
LOFTY* (1)
MOUND* (1)
MOUNTAIN* (1)
OBELISK* (1)
OVERHEAD* (1)
PEAK* (1)
PILE* (1)
PILING* (1)
PLANET* (1)
PRECIPIC* (1)
PYRAMID* (1)
RAFTER* (1)
RAINBOW* (1)
RAMPART* (1)
RIDG* (1)
ROOF* (1)
SKY (1)
SLOP* (1)
SPIR* (1)
STEEP* (1)
SUMMIT* (1)
TALL* (1)
TALLER* (1)
TALLEST* (1)
TOP (1)
TOPP* (1)
TOPS (1)
TOWER* (1)
TREE* (1)
TRELLI* (1)
UPPER* (1)
UPPERMOST* (1)
ZENITH* (1)
DESCENT
BASE (1)
BASES (1)
BURI* (1)
BURROW* (1)
BURY* (1)
DESCEND* (1)
DESCENT* (1)
DIG (1)
DIGG* (1)
DIGS (1)
DIP (1)
DIPP* (1)
DIPS (1)
DIVE* (1)
DOWNHILL* (1)
DOWNSTREAM* (1)
DROOP* (1)
DROP (1)
DROPS (1)
DUG (1)
FALL* (1)
FALLEN* (1)
FELL* (1)
HEADLONG* (1)
LEAN* (1)
PLUNG* (1)
RECED* (1)
RECLIN* (1)
SANK* (1)
SINK* (1)
SLID* (1)
SLIP* (1)
STOOP* (1)
SUNDOWN* (1)
SUNK* (1)
SUNKEN* (1)
SUNSET* (1)
SWOOP* (1)
TOPPL* (1)
TUMBL* (1)
DEPTH
BELOW* (1)
BENEATH* (1)
BOTTOM* (1)
CANYON* (1)
CAVE* (1)
CAVING* (1)
CELLAR* (1)
CHASM* (1)
CREVAS* (1)
DEEP* (1)
DEEPER* (1)
DEPTH* (1)
DITCH* (1)
DOWNWARD* (1)
GUTTER* (1)
HOLE (1)
HOLES (1)
LOW* (1)
PIT (1)
PITS (1)
PITT* (1)
PRECIPITOU* (1)
RAVIN* (1)
ROOT* (1)
SUBMARIN* (1)
TRENCH* (1)
TUNNEL* (1)
UNDER (1)
UNDERGROUND* (1)
UNDERNEATH* (1)
UNDERWORLD* (1)
VALLEY* (1)
FIRE
SOLAR (1)
ABLAZ* (1)
AFIR* (1)
ASH (1)
ASHES (1)
BLAST* (1)
BLAZ* (1)
BOIL* (1)
BROIL* (1)
BURN* (1)
BURNT* (1)
CANDL* (1)
CHARCOAL* (1)
COAL* (1)
COMBUST* (1)
EMBER* (1)
FIERY* (1)
FIRE* (1)
FLAM* (1)
HEARTH* (1)
HEAT* (1)
HOT (1)
IGNIT* (1)
INFERNO* (1)
INFLAM* (1)
KINDL* (1)
LIT (1)
MELT* (1)
SCORCH* (1)
SEAR* (1)
SIZZL* (1)
SMOK* (1)
SMOLDER* (1)
SMOULDER* (1)
SPARK* (1)
SULTRY* (1)
SUN (1)
SUNN* (1)
SUNS (1)
SUNSTROK* (1)
TROPIC* (1)
TROPICAL* (1)
WARM* (1)
WARMTH* (1)
WATER
BATH* (1)
BEACH* (1)
BROOK* (1)
BUBBL* (1)
BUCKET* (1)
CREEK* (1)
DAM (1)
DAMM* (1)
DAMP* (1)
DAMS (1)
DEW (1)
DEWS (1)
DEWY (1)
DIKE* (1)
DOWNPOUR* (1)
DRENCH* (1)
SHORING (1)
SURF (1)
SURFING (1)
DRIP* (1)
FEN (1)
FLOOD* (1)
FLUID* (1)
FOAM* (1)
FOUNTAIN* (1)
GURGL* (1)
HUMID* (1)
LAKE (1)
LAKES (1)
LIQUID* (1)
MOAT* (1)
MOIST* (1)
MOISTUR* (1)
MOSS (1)
MOSSES (1)
OCEAN* (1)
OVERFLOW* (1)
PERSPIR* (1)
PERSPIRAT* (1)
POND* (1)
POOL* (1)
POUR* (1)
RAIN* (1)
RAINFALL* (1)
RIVER* (1)
SATURAT* (1)
SEA (1)
SEAS (1)
SHORE (1)
SHORES (1)
SHOWER* (1)
SOAK* (1)
SPLASH* (1)
SPRINKL* (1)
STEAM* (1)
STEAMY* (1)
STREAM* (1)
SWAM* (1)
SWAMP* (1)
SWAMPY* (1)
SWIM* (1)
SWUM* (1)
TIDE (1)
TIDES (1)
TIDING (1)
TRICKL* (1)
WADE* (1)
WADING (1)
WASH* (1)
WATER* (1)
WATERFALL* (1)
WET* (1)
SECONDARY
ABSTRACTION
DIVERSE (1)
DIVERSIFICATION (1)
DIVERSIFIED (1)
DIVERSITY (1)
EVIDENT (1)
EVIDENTIAL (1)
GUESS* (1)
LOGISTIC (1)
ABSTRACT* (1)
ALMOST* (1)
ALTERNATIV* (1)
ANALY* (1)
ATTRIBUT* (1)
AXIOM* (1)
BASIC* (1)
BELIEF* (1)
BELIEV* (1)
CALCULAT* (1)
CAUS* (1)
CERTAIN* (1)
CHARACTERIZ* (1)
CHOIC* (1)
CHOOS* (1)
CHOS* (1)
CIRCUMSTANC* (1)
COMPREHEND* (1)
COMPAR* (1)
COMPREHENS* (1)
CONDITIONAL* (1)
CONCENTRAT* (1)
CONCEPT* (1)
CONCLUD* (1)
CONJECTUR* (1)
CONSEQUENC* (1)
CONSEQUENT* (1)
CONSIDER* (1)
CONTRIV* (1)
CRITER* (1)
CRITERIA* (1)
DECID* (1)
DEEM* (1)
DEFIN* (1)
DELIBERAT* (1)
DETERMIN* (1)
DIFFERENC* (1)
DIFFERENT* (1)
DISTINCT* (1)
DISTINGUISH* (1)
DOCTRIN* (1)
EFFECT* (1)
ESTABLISH* (1)
ESTIMAT* (1)
EVALUAT* (1)
EVIDENC* (1)
EXAMIN* (1)
EXAMPL* (1)
EXCEPT* (1)
FACT (1)
FACTS (1)
FEATUR* (1)
FIGUR* (1)
FORETHOUGHT* (1)
FORMULAT* (1)
GUES* (1)
HISTORY* (1)
IDEA* (1)
IMPORTANC* (1)
IMPORTANT* (1)
INFORMAT* (1)
INTERPRET* (1)
INTERPRETAT* (1)
JUDG* (1)
JUDGMENT* (1)
KNEW* (1)
KNOW* (1)
LEARN* (1)
LOGIC* (1)
MAY (1)
MEANT* (1)
MISTAK* (1)
MISTAKEN* (1)
MISTOOK* (1)
MODEL* (1)
OPIN* (1)
OTHERWIS* (1)
PERHAP* (1)
PLAN* (1)
POSSI* (1)
PREDICAT* (1)
PREDICT* (1)
PROBAB* (1)
PROBABL* (1)
PROBLEM* (1)
PROOF* (1)
PROV* (1)
PURPOS* (1)
QUALI* (1)
QUANT* (1)
RE-ANALY* (1)
RE-EXAMIN* (1)
RATIONAL* (1)
REAL (1)
REALITY* (1)
REASON* (1)
REASONABL* (1)
RECONSIDER* (1)
REEXAMIN* (1)
REFORMULAT* (1)
REINTERPRETAT* (1)
RELEARN* (1)
RELEVANC* (1)
RELEVANT* (1)
RESEARCH* (1)
RESOLV* (1)
SCHEM* (1)
SCIENC* (1)
SCIENTIFIC* (1)
SELECT* (1)
SIGNIFICANC* (1)
SOLUT* (1)
SOMETH* (1)
SOMEWHAT* (1)
SOURC* (1)
SUBJECT* (1)
SUPPOS* (1)
SURE (1)
SURELY (1)
TEND* (1)
THEM* (1)
THEOR* (1)
THINK* (1)
THINKER* (1)
THOUGHT* (1)
TOPIC* (1)
TRUE (1)
TRULY (1)
TRUTH* (1)
TTT1 (1)
UNDERSTAND* (1)
UNDERSTOOD* (1)
WEIGH (1)
WEIGHED* (1)
WEIGHING* (1)
WEIGHS (1)
WHY (1)
SOCIAL BEHAVIOR
GUEST* (1)
QUOTA (1)
QUOTA-* (1)
QUOTAS (1)
ACQUIESCENCE (1)
APPROBATION (1)
CONSENSUS* (1)
CONSULT (1)
PROSOCIAL (1)
SOCIABLE (1)
ABLE* (1)
ACCEPT* (1)
ACCEPTANC* (1)
ADDRES* (1)
ADMIT* (1)
ADVIC* (1)
ADVIS* (1)
AGRE* (1)
AID* (1)
ALLOW* (1)
ANNOUNC* (1)
ANSWER* (1)
APOLOGIS* (1)
APOLOGIZ* (1)
APPEAL* (1)
APPROV* (1)
APPROVAL* (1)
ASK (1)
ASKED (1)
ASKING (1)
ASKS (1)
ASSIST* (1)
ASSUR* (1)
BARGAIN* (1)
BECKON* (1)
BESEECH* (1)
BORROW* (1)
CALL* (1)
COMMENT* (1)
COMMIT* (1)
COMMUNICAT* (1)
CONDUCT* (1)
CONFER* (1)
CONFES* (1)
CONFID* (1)
CONFIRM* (1)
CONGRATULAT* (1)
CONSENT* (1)
CONSOL* (1)
CONSOLAT* (1)
CONVERS* (1)
CONVERSAT* (1)
CONVINC* (1)
COOPERAT* (1)
COUNSEL* (1)
DECLAR* (1)
DEPEND* (1)
DEPENDENT* (1)
DESCRIB* (1)
DIALOGU* (1)
DISCOURS* (1)
DISCUS* (1)
DISCUSS* (1)
DONAT* (1)
EDUCAT* (1)
ELECT* (1)
ENCOURAG* (1)
ENCOURAGEMENT* (1)
ENGAG* (1)
ESCORT* (1)
EXCUS* (1)
EXPLAIN* (1)
FOLLOW* (1)
FORGAV* (1)
FORGIV* (1)
FORGIVEN* (1)
GENEROSITY* (1)
GENEROU* (1)
GIFT* (1)
GRANT* (1)
GREET* (1)
GUID* (1)
GUIDANC* (1)
HELP* (1)
IMITAT* (1)
IMPLOR* (1)
INFLUENC* (1)
INFORM* (1)
INQUIR* (1)
INSTRUCT* (1)
INTERVIEW* (1)
INTRODUC* (1)
INVIT* (1)
KNEEL* (1)
LEND* (1)
LENT* (1)
MEET* (1)
MENT* (1)
MESSAG* (1)
MET* (1)
MUTUAL* (1)
OFFER* (1)
PARDON* (1)
PARTICIPAT* (1)
PERSUAD* (1)
PERSUA* (1)
PLEAD* (1)
PLEAS* (1)
PREACH* (1)
PROCLAIM* (1)
PROMIS* (1)
PROPOS* (1)
PROTECT* (1)
PROVID* (1)
QUOT* (1)
RECIT* (1)
REEDUCATION (1)
REMARK* (1)
REMIND* (1)
REPLI* (1)
REPLY (1)
REPRESENT* (1)
REQUEST* (1)
RESCU* (1)
RESPOND* (1)
RESPONS* (1)
SAID* (1)
SALE (1)
SALES (1)
SAY* (1)
SERVIC* (1)
SHAR* (1)
SHELTER* (1)
SIGNAL* (1)
SOCIAL* (1)
SOLICIT* (1)
SPEAK* (1)
SPEAKER* (1)
SPEECH* (1)
SPOK* (1)
SPOKEN* (1)
SUGGEST* (1)
SWORN* (1)
TALK* (1)
TAUGHT* (1)
TEACH* (1)
TELL* (1)
THANK* (1)
TOLD* (1)
TREAT* (1)
UTTER* (1)
VISIT* (1)
INSTRUMENTAL BEHAVIOR
AVAIL (1)
CAVEAT* (1)
DIVESTMENT* (1)
DIVIDEND* (1)
FOUNDR* (1)
LABORATOR* (1)
SPIN-OFF* (1)
AVAILABILITY (1)
COMPONENT* (1)
INGREDIENT (1)
LOGISTICS (1)
MERCHANDISE (1)
PROVISION* (1)
ACHIEV* (1)
ACHIEVEMENT* (1)
ACQUIR* (1)
ACQUISIT* (1)
AFFORD* (1)
AIM* (1)
APPLIC* (1)
APPLIE* (1)
APPLY (1)
ARCHITECT* (1)
ASSEMBL* (1)
ATTAIN* (1)
ATTEMPT* (1)
AVAILABL* (1)
BELONG* (1)
BID* (1)
BOUGHT* (1)
BUILD* (1)
BUILT* (1)
BURDEN* (1)
BUSINES* (1)
BUY* (1)
CAPABL* (1)
CARRI* (1)
CARRY* (1)
CLAIM* (1)
COLLECT* (1)
CONSTRUCT* (1)
COPI* (1)
COPY* (1)
COST* (1)
COUNT* (1)
CRAFT* (1)
CRAFTSMAN* (1)
CULTIVAT* (1)
CURE* (1)
CURING* (1)
DELIVER* (1)
EARN* (1)
EFFORT* (1)
EMPLOY* (1)
ENDEAVOR* (1)
FACTORI* (1)
FACTORY* (1)
FEAT (1)
FEATS (1)
FIND* (1)
FINISH* (1)
FORGE (1)
FORGES (1)
FOUND* (1)
GAIN* (1)
GOAL* (1)
GRASP* (1)
HARVEST* (1)
HIRE (1)
HIRED (1)
HIRES (1)
HIRING* (1)
IMPROV* (1)
INDUSTRI* (1)
INDUSTRY* (1)
JOB (1)
JOBS (1)
LABOR* (1)
LABORIOU* (1)
LABOUR* (1)
LABOURIOU* (1)
LESSON* (1)
MACHIN* (1)
MACHINERY* (1)
MAK* (1)
MANIPULAT* (1)
MANUFACTUR* (1)
MARKET* (1)
MEND* (1)
MERCHANT* (1)
MONEY* (1)
OBTAIN* (1)
OCCUPAT* (1)
OCCUPY* (1)
OWNERSHIP* (1)
PAID* (1)
PAY (1)
PAYING* (1)
PAYS (1)
PERFORM* (1)
PICK* (1)
PLOUGH* (1)
PLOW* (1)
POSSES* (1)
POSSESS* (1)
PRACTIC* (1)
PREPAR* (1)
PRIC* (1)
PRIVATION* (1)
PRODUC* (1)
PROFIT* (1)
PROFITABL* (1)
PROPERTY* (1)
PURCHAS* (1)
PURSU* (1)
REACH* (1)
RECONSTRUCT (1)
RECORD* (1)
RECOVER* (1)
REPAIR* (1)
REPRODUCE (1)
RESTOR* (1)
RESULT* (1)
RISK* (1)
SEL* (1)
SELL* (1)
SKIL* (1)
SKILL* (1)
SKILLFUL* (1)
SOLD* (1)
SOW* (1)
SPEND* (1)
SPENT* (1)
STUDENT* (1)
STUDI* (1)
STUDIOU* (1)
STUDY* (1)
SUCCE* (1)
SWEEP* (1)
SWEPT* (1)
TASK* (1)
TEST* (1)
TOIL (1)
TOILED (1)
TOILS* (1)
TRAD* (1)
TRIED (1)
TRY (1)
TRYING* (1)
TRYS (1)
USE (1)
USED (1)
USES (1)
USING (1)
WIN (1)
WINNING* (1)
WINS (1)
WON (1)
WORK* (1)
RESTRAINT
COMPTROLLER* (1)
DISCIPLINE (1)
MAGIST* (1)
PENALIZ* (1)
PENITENTIARY (1)
ARREST* (1)
ASSIGN* (1)
AUTHORIZ* (1)
BAR (1)
BARRED (1)
BARRING (1)
BARS (1)
BIND* (1)
BLOCK* (1)
BLOCKAD* (1)
BOUND* (1)
CAG* (1)
CAPTIV* (1)
CAPTIVITY* (1)
CAPTUR* (1)
CATCH* (1)
CAUGHT* (1)
CENSUR* (1)
CHASTIS* (1)
CHASTIZ* (1)
COERC* (1)
COMPEL* (1)
CONFIN* (1)
CONFORM* (1)
CONFORMITY* (1)
CONSTRAIN* (1)
CONSTRAINT* (1)
CONSTRICT* (1)
CONTROL* (1)
DECREE* (1)
DETAIN* (1)
DETER* (1)
DUNGEON* (1)
ENCLOS* (1)
FORBAD* (1)
FORBID* (1)
FORBIDDEN* (1)
GUARD* (1)
GUARDIAN* (1)
HALT* (1)
HAMPER* (1)
HINDER* (1)
HINDRANC* (1)
IMPERATIV* (1)
IMPRISON* (1)
INHIBIT* (1)
INSIST* (1)
INTERFER* (1)
INTERRUPT* (1)
JAIL* (1)
LEASH* (1)
LIMIT* (1)
LOCK* (1)
MANAG* (1)
MUST* (1)
NECESSARY* (1)
NECESSITY* (1)
OBEDIENC* (1)
OBEY* (1)
OBLIG* (1)
OBLIGAT* (1)
OBSTACL* (1)
OBSTRUCT* (1)
PENALTI* (1)
PENALTY* (1)
PERMISS* (1)
PERMIT* (1)
POLIC* (1)
POLICEMAN* (1)
POLICEMEN* (1)
PRESCRIB* (1)
PREVAIL* (1)
PREVENT* (1)
PRISON* (1)
PROHIBIT* (1)
PUNISH* (1)
PUNISHMENT* (1)
REFUS* (1)
REGULAT* (1)
REIGN* (1)
REQUIR* (1)
REQUIREMENT* (1)
RESIST* (1)
RESTRAIN* (1)
RESTRAINT* (1)
RESTRICT* (1)
SCOLD* (1)
SHUT* (1)
STOP* (1)
STRICT* (1)
SUMMON* (1)
SUPPRES* (1)
TABOO* (1)
TAX* (1)
THWART* (1)
ORDER
ORDINAL (1)
ACCURAT* (1)
ARRANG* (1)
ARRAY* (1)
BALANC* (1)
CATALOG* (1)
CLASS* (1)
CONSISTENC* (1)
CONSISTENT* (1)
CONSTANC* (1)
CONSTANT* (1)
DIVID* (1)
FORM* (1)
FORMULA* (1)
GRAD* (1)
INDEX* (1)
LIST* (1)
MEASUR* (1)
METHOD* (1)
MODERAT* (1)
NEAT* (1)
NORM* (1)
NORMAL* (1)
ORGANI* (1)
ORDER (1)
PATTERN* (1)
PRECIS* (1)
RANK* (1)
REGULAR* (1)
REORGANIZ* (1)
ROUTIN* (1)
SERIAL (1)
SERIES* (1)
SIMPL* (1)
SIMPLICITY* (1)
STABILITY* (1)
STANDARD* (1)
SYMMETR* (1)
SYSTEM* (1)
UNIFORM* (1)
UNIVERSAL* (1)
TEMPORAL REFERENCES
FULL-TIME (1)
LONG-TERM (1)
LONGEVIT* (1)
PART-TIME (1)
SHORT-TERM (1)
ABRUPT* (1)
AGAIN (1)
AGO (1)
ALREADY* (1)
ANCIENT (1)
BREVITY* (1)
BRIEF* (1)
CLOCK* (1)
DAILY* (1)
DATE (1)
DATED (1)
DATES (1)
DATING (1)
DECAD* (1)
DUR* (1)
DURAT* (1)
EARLIER* (1)
EARLY* (1)
EPHEMERAL* (1)
EVER* (1)
FORMER* (1)
FREQUENT* (1)
HAST* (1)
HENCEFORTH* (1)
HOUR* (1)
IMMEDIAT* (1)
IMMEDIATE* (1)
INSTANT* (1)
INTERLUD* (1)
MEANTIM* (1)
MEANWHIL* (1)
MINUT* (1)
MOMENT* (1)
MOMENTARY* (1)
MONTH* (1)
NOW (1)
OCCAS* (1)
OCCASIONAL* (1)
OFTEN* (1)
OLD (1)
OLDER* (1)
ONCE (1)
PAST* (1)
PREMATUR* (1)
PRESENT* (1)
PREVIOU* (1)
PRIOR* (1)
QUICK* (1)
SEASON* (1)
SELDOM* (1)
SOMETIM* (1)
SOON* (1)
SOONER* (1)
SUDDEN* (1)
TEMPORARY* (1)
THEN* (1)
TILL* (1)
TIME* (1)
TIMING* (1)
TODAY* (1)
TONIGHT* (1)
WEEK* (1)
WHEN* (1)
WHENEVER* (1)
WHIL* (1)
YEAR* (1)
YESTERDAY* (1)
MORAL IMPERATIVE
LEGITIMACY (1)
RESPECT (1)
BIRTHRIGHT* (1)
COMMANDMENT* (1)
CONSCIENC* (1)
CONSCIENTIOU* (1)
CORRECT* (1)
CUSTOM (1)
CUSTOMER* (1)
CUSTOMIZ* (1)
DUTI* (1)
DUTY* (1)
ETHIC* (1)
HONEST* (1)
HONESTY* (1)
HONOR* (1)
HONORABL* (1)
HONOUR* (1)
HONOURABL* (1)
JUSTIC* (1)
LAW (1)
LAWFUL* (1)
LAWS (1)
LEGAL* (1)
LEGITIMAT* (1)
MORAL* (1)
MORALITY* (1)
OUGHT* (1)
PREROGATIV* (1)
PRINCIPL* (1)
PRIVILEG* (1)
PROPER* (1)
RECTITUD* (1)
RESPECTFUL* (1)
RESPONSIBILITY* (1)
RESPONSIBL* (1)
RIGHT* (1)
RIGHTEOU* (1)
RIGHTFUL* (1)
SANCT* (1)
SHOULD* (1)
TRUSTWORTHY* (1)
UNJUST* (1)
UPRIGHT* (1)
VIRTU* (1)
EMOTIONS
POSITIVE AFFECT
AMUS* (1)
AMUSEMENT* (1)
BLITH* (1)
CAREFRE* (1)
CELEBRAT* (1)
CHEER* (1)
CHEERFUL* (1)
CHEERY* (1)
CHUCKL* (1)
DELIGHT* (1)
DELIGHTFUL* (1)
ELAT* (1)
ENJOY* (1)
ENJOYABL* (1)
ENJOYMENT* (1)
ENTERTAIN* (1)
ENTERTAINMENT* (1)
ENTHUSIASM* (1)
ENTHUSIASTIC* (1)
EXCIT* (1)
EXHILERAT* (1)
EXULT* (1)
EXULTANT* (1)
FUN (1)
FUNNY* (1)
GAIETY* (1)
GAY* (1)
GLAD* (1)
GLADNES* (1)
GLEE (1)
GLEEFUL* (1)
GLEELY (1)
GRATIFI* (1)
GRATIFY* (1)
GRIN* (1)
HAPPINES* (1)
HAPPY* (1)
HILARIOU* (1)
HUMOR* (1)
HUMOROU* (1)
HUMOUR* (1)
HUMOUROU* (1)
JOCUND* (1)
JOK* (1)
JOLLY (1)
JOVIAL* (1)
JOY* (1)
JOYFUL* (1)
JOYOU* (1)
LAUGH* (1)
LAUGHTER* (1)
MERRIMENT* (1)
MERRY* (1)
MIRTH* (1)
MIRTHFUL* (1)
OVERJOY* (1)
PLAYFUL* (1)
PLEASANTRY* (1)
PLEASUR* (1)
PLEASURABL* (1)
REJOIC* (1)
RELIEF* (1)
RELIEV* (1)
ROLLICK* (1)
SATISF* (1)
SMIL* (1)
THRIL* (1)
THRILL* (1)
VIVACIOU* (1)
VIVACITY* (1)
ANXIETY
TREMOR (1)
AFRAID* (1)
AGHAST* (1)
ALARM* (1)
ANGUISH* (1)
ANXI* (1)
AVOID* (1)
BLUSH* (1)
CARES (1)
COWARD* (1)
COWER* (1)
CRISI* (1)
DANGEROU* (1)
DESPERAT* (1)
DISTRES* (1)
DREAD* (1)
DREADFUL* (1)
FEAR* (1)
FEARFUL* (1)
FRANTIC* (1)
FRET* (1)
FRIGHT* (1)
HORRIFI* (1)
HORRIFY* (1)
HORROR* (1)
NERVOU* (1)
NERVOUSNES* (1)
PANIC* (1)
PHOBIA* (1)
PHOBIC* (1)
SCARE (1)
SCARED (1)
SCARES (1)
SCARY (1)
SHRIEK* (1)
SHUDDER* (1)
SHY* (1)
TERRIFI* (1)
TERRIFY* (1)
TERROR* (1)
TIMID* (1)
TRAUMA* (1)
TREMBL* (1)
TREMULOU* (1)
TROUBL* (1)
UNEASINES* (1)
UNEASY* (1)
WORRI* (1)
WORRY* (1)
SADNESS
AGGRIEVED (1)
ALAS (1)
DEJECT* (1)
DEPRES* (1)
DEPRESS* (1)
DESPAIR* (1)
DESPONDANT* (1)
DESPONDENT* (1)
DIRG* (1)
DISAPPOINT* (1)
DISAPPOINTMENT* (1)
DISCONSOLAT* (1)
DISCOURAG* (1)
DISHEARTEN* (1)
DISMAL* (1)
DISSATISFI* (1)
DISSATISFY* (1)
DISTRAUGHT* (1)
DOLDRUM* (1)
DOWNCAST* (1)
DREARY* (1)
ELEGY* (1)
FORLORN* (1)
FROWN* (1)
FUNEREAL* (1)
GRIE* (1)
GROAN* (1)
HOPELES* (1)
HUMILIAT* (1)
LAMENT* (1)
LAMENTAT* (1)
LONE* (1)
LONELINES* (1)
MELANC* (1)
MISERABL* (1)
MISERI* (1)
MISERY* (1)
MOAN* (1)
MOURN* (1)
MOURNFUL* (1)
ORPHAN* (1)
PAIN* (1)
PITIFUL* (1)
PLAINT* (1)
REGRET* (1)
REGRETFUL* (1)
REMORS* (1)
REPENT* (1)
REPENTANC* (1)
REPENTENC* (1)
RUE (1)
SAD (1)
SADDEN* (1)
SADLY (1)
SADNES* (1)
SOB (1)
SOBB* (1)
SOBS (1)
SOMBER* (1)
SOMBR* (1)
SORROW* (1)
SORROWFUL* (1)
SORRY* (1)
SUFFER* (1)
TEARFUL* (1)
TRAGEDY* (1)
TRAGIC* (1)
UNHAPPINES* (1)
UNHAPPY* (1)
WAIL* (1)
WEEP* (1)
WEPT* (1)
WHIN* (1)
WOE (1)
WOES (1)
AFFECTION
AFFECT* (1)
AFFECTIONAT* (1)
AMOROU* (1)
AMOUROU* (1)
APPRECIAT* (1)
ATTRACTIV* (1)
BEFRIEND* (1)
BELOV* (1)
BOSOM* (1)
BRIDAL* (1)
BRIDE* (1)
CHERISH* (1)
CONGENIAL* (1)
CORDIAL* (1)
COURTSHIP* (1)
DARL* (1)
DEAR* (1)
DEVOT* (1)
EMBRAC* (1)
ENAMOR* (1)
ENAMOUR* (1)
ENDEAR* (1)
FAMILIAR* (1)
FONDER (1)
FAREWELL* (1)
FAVOR* (1)
FAVOUR* (1)
FIANC* (1)
FLIRT* (1)
FOND (1)
FONDNES* (1)
FRATERNITY* (1)
FRIEND* (1)
FRIENDSHIP* (1)
GOODBY* (1)
GRATEFUL* (1)
INTIMACY* (1)
INTIMAT* (1)
KIND* (1)
KINDNES* (1)
LIKE* (1)
LIKING* (1)
LOV* (1)
MARRI* (1)
MARRIAG* (1)
MARRY* (1)
MATE (1)
MATED (1)
MATES (1)
MATING* (1)
MERCY* (1)
PAT (1)
PATS (1)
PATT* (1)
PITI* (1)
PITY* (1)
ROMANC* (1)
SWEETHEART* (1)
SYMPAT* (1)
UNSELFISH* (1)
WARMHEART* (1)
WELCOM* (1)
WOOED* (1)
WOOING* (1)
WOOS (1)
AGGRESSION
ABHOR* (1)
ABUS* (1)
ABUSIV* (1)
ACCUS* (1)
AFFLICT* (1)
AGGRESS* (1)
AGGRESSIV* (1)
AMBUSH* (1)
ANGER* (1)
ANGRI* (1)
ANGRIER* (1)
ANGRY* (1)
ANNIHILAT* (1)
ANNOY* (1)
ANNOYANC* (1)
ANTAGONIZ* (1)
ARGU* (1)
ARGUMENT* (1)
ARMY* (1)
ARROW* (1)
ASSAULT* (1)
ATTACK* (1)
AVENG* (1)
AX (1)
AXE (1)
AXES (1)
BATTL* (1)
BEAK* (1)
BEAT* (1)
BEATEN* (1)
BETRAY* (1)
BLADE* (1)
BLAM* (1)
BLOODY* (1)
BOTHER* (1)
BRAWL* (1)
BREAK* (1)
BROK* (1)
BROKEN* (1)
BRUTAL* (1)
CANNON* (1)
CHID* (1)
COMBAT* (1)
COMPLAIN* (1)
CONFLICT* (1)
CONDEMN* (1)
CONTROVERSY* (1)
CRITIC* (1)
CRUEL* (1)
CRUSH* (1)
CUT (1)
CUTS (1)
CUTT* (1)
DAMAG* (1)
DECEI* (1)
DEFEAT* (1)
DEGRAD* (1)
DEMOLISH* (1)
DEPRIV* (1)
DERID* (1)
DESPIS* (1)
DESTROY* (1)
DESTRUCT* (1)
DESTRUCTIV* (1)
DETEST* (1)
DISAGRE* (1)
DISAGREEMENT* (1)
DISAPPROV* (1)
DISCONTENT* (1)
DISLIK* (1)
DISPUT* (1)
DISTURB* (1)
DOUBT* (1)
ENEMI* (1)
ENEMY* (1)
ENRAG* (1)
EXASPERAT* (1)
CONTROVERSIAL* (1)
CRITIQUE (1)
DISPARAG* (1)
IRRITABLE (1)
EXPLOIT* (1)
EXTERMINAT* (1)
FEUD* (1)
FIERC* (1)
FIGHT* (1)
FOUGHT* (1)
FURIOU* (1)
FURY* (1)
GASH* (1)
GRAPPL* (1)
GROWL* (1)
GRUDG* (1)
GUN (1)
GUNN* (1)
GUNS (1)
HARM* (1)
HARSH* (1)
HATE* (1)
HATR* (1)
HIT (1)
HITS (1)
HITT* (1)
HOMICID* (1)
HOSTIL* (1)
HURT* (1)
INGRAT* (1)
INJUR* (1)
INJURY* (1)
INSULT* (1)
INVAD* (1)
INVAS* (1)
IRAT* (1)
IRK* (1)
IRRITAT* (1)
JEALOU* (1)
JEALOUSY* (1)
JEER* (1)
KICK* (1)
KIL* (1)
KILL* (1)
KNIF* (1)
KNIV* (1)
LOATH* (1)
MAIM* (1)
MISTREAT* (1)
MOCK* (1)
MURDER* (1)
OBLITERAT* (1)
OFFEND* (1)
OPPOS* (1)
PREDATORY* (1)
PROTEST* (1)
QUARREL* (1)
RAGE (1)
RAGES (1)
RAGING (1)
RAPIN* (1)
REBEL* (1)
REBELL* (1)
REBUK* (1)
RELENTLES* (1)
REPROACH* (1)
RESENT* (1)
RESENTMENT* (1)
RETRIBUT* (1)
REVENG* (1)
REVOLT* (1)
RIDICUL* (1)
RIP (1)
RIPP* (1)
RIPS (1)
ROB (1)
ROBB* (1)
ROBS (1)
SARCASM* (1)
SARCASTIC* (1)
SCALP* (1)
SCOF* (1)
SCOFF* (1)
SCOURG* (1)
SEIZ* (1)
SEVER* (1)
SEVERITY* (1)
SHATTER* (1)
SHOOT* (1)
SHOT* (1)
SHOV* (1)
SLAIN* (1)
SLANDER* (1)
SLAP* (1)
SLAUGHTER* (1)
SLAY* (1)
SLEW* (1)
SMASH* (1)
SNARL* (1)
SNEER* (1)
SPEAR* (1)
SPITEFUL* (1)
SPURN* (1)
STAB* (1)
STEAL* (1)
STOL* (1)
STOLEN* (1)
STRANGL* (1)
STRIF* (1)
STRIK* (1)
STRUCK* (1)
STRUGGL* (1)
STUBBORN* (1)
SWORD* (1)
TAUNT* (1)
TEMPER* (1)
THREAT* (1)
THREATEN* (1)
TORE (1)
TORMENT* (1)
TORN* (1)
TORTUR* (1)
TRAITOR* (1)
TRAMPL* (1)
TREACHEROU* (1)
TREACHERY* (1)
TYRANT* (1)
UNKIND* (1)
VENGEANC* (1)
VENGEFUL* (1)
VEX (1)
VEXING (1)
VIOLAT* (1)
VIOLENC* (1)
VIOLENT* (1)
WAR (1)
WARRING (1)
WARRIOR* (1)
WARS (1)
WEAPON* (1)
WHIP* (1)
WOUND* (1)
WRATH* (1)
FOOTBALL* (1)
WRECK* (1)
EXPRESSIVE BEHAVIOR
ART (1)
ARTS* (1)
BARD* (1)
BARK* (1)
BAWL* (1)
BELLOW* (1)
BLEAT* (1)
CAROL* (1)
CHANT* (1)
CLOWN* (1)
CRIE* (1)
CRIING (1)
CRY (1)
DANC* (1)
EXCLAIM* (1)
EXPRESSIV* (1)
FRISK* (1)
FROLIC* (1)
GAME* (1)
GUITAR* (1)
HARP* (1)
HORN* (1)
HURRAH* (1)
HURRAY* (1)
LULLABY* (1)
LUTE (1)
LUTES (1)
LYRE (1)
MINSTREL* (1)
NEIGH (1)
NEIGHS (1)
PAINTER* (1)
PLAY* (1)
POEM* (1)
POET* (1)
POETIC* (1)
POETRY* (1)
ROAR* (1)
SANG* (1)
SCREAM* (1)
SHOUT* (1)
SIGH* (1)
SING (1)
SINGS* (1)
SPORT* (1)
SUNG* (1)
TROUBADOR* (1)
TROUBADOUR* (1)
VIOLIN* (1)
WARBL* (1)
YEL* (1)
YELL* (1)
GLORY
ADMIR* (1)
ADMIRABL* (1)
ADVENTUR* (1)
APPLAUD* (1)
APPLAUS* (1)
ARROGANC* (1)
ARROGANT* (1)
AUDACITY* (1)
AWE* (1)
BOAST* (1)
BOASTFUL* (1)
BRILLIANC* (1)
BRILLIANT* (1)
CAESAR* (1)
CASTL* (1)
CONQUE* (1)
CROWN* (1)
DAZZL* (1)
EAGL* (1)
ELIT* (1)
EMPEROR* (1)
EMPIR* (1)
EXALT* (1)
EXHIBIT* (1)
EXQUISIT* (1)
EXTRAORDINARY* (1)
EXTREM* (1)
FAME (1)
FAMED (1)
FAMOU* (1)
FOREMOST* (1)
GENIU* (1)
GLOR* (1)
GOLD* (1)
GOLDEN* (1)
GRANDEUR* (1)
GREAT* (1)
HAUGHTY* (1)
HERO* (1)
HOMAG* (1)
ILLUSTRIOU* (1)
KINGDOM* (1)
MAGESTIC* (1)
MAGNIFICENT* (1)
MAJESTIC* (1)
MAJESTY* (1)
NOBL* (1)
OUTSTAND* (1)
PALAC* (1)
POMP* (1)
PRESTIG* (1)
PRID* (1)
PRINC* (1)
PROUD* (1)
RENOWN* (1)
RESPLENDENT* (1)
RICH* (1)
ROYAL* (1)
ROYALTY* (1)
SCEPTR* (1)
SCORN* (1)
SPLENDID* (1)
SPLENDOR* (1)
STRUT* (1)
SUBLIM* (1)
SUPERIOR* (1)
SUPERIORITY* (1)
SUPREM* (1)
THRON* (1)
TRIUMP* (1)
VICTOR* (1)
VICTORIOU* (1)
VICTORY* (1)
WEALTH* (1)
WONDER* (1)
WONDERFUL* (1)
"""
DEFAULT_RID_EXCLUSION_LIST = """
PROVINC*
MIDDLE-*
DIVERSION*
DIETHYL*
COUNTY*
SHARK*
PRICK*
PASTE*
HANDICAP*
HANDBOOK*
GRAPH*
FACTORIAL*
BUTTERFL*
BLANKET*
FASTI*
ENERGUMEN*
RELIGHT
REVERSE
DISPOSSESS*
NEEDL*
EXTREMITY
EXTREMENESS
EMPIRI*
TEMPERAT*
TEMPERAN*
TEMPERAMENT
SEVERAL
HARMO*
PATTERN
DECADENT
TAXO*
TAXI*
DETERS*
DETERR*
DETERIO*
DETERG*
TRADU*
TRADI*
TESTAM*
SOWBUG*
SELF*
FACTORIAL
COUNTE*
COUNTR*
TREATMENT*
TELLU*
QUOTI*
QUOTH*
PROVIDE*
PROMISC*
METTL*
METR*
METO*
METI*
METH*
METE*
META*
MENTO*
MENTH*
MENTA*
INFORMAL
ELECTR*
CONSOLIDAT*
SOLUTE
PROVER*
MAY-1
POOLED-1
POOL-1
HEATH*
COALESC*
COALI*
UNDERGRADUAT*
UNDERC*
FALLAC*
DIVERGEN*
ARIST*
REGU*
REGR*
REGIS*
REGE*
POSITIV*
PLACEN*
PLACEB*
OVERV*
OVERL*
LIMBIC*
CHINE*
PORTU*
PORTR*
PORTM*
PORTL*
PORTI*
PORTF*
PORTE*
PORTA*
PATHO*
PATHE*
SECRETI*
SECRETO*
SECRETA*
ROLE
QUAKER
TRIPT*
TRIPLE
TRIPE
RESTR*
RESTO*
RESTI*
RESTA*
REPOSSESS*
CONTENTS*
CONTENT-*
HOARY
HOARS
HOARD*
BASKET
AWARD*
TESTIN*
TESTIF*
TESTIMON*
STALK
SPATI*
GRAPHIC*
BREADTH
"""
class RIDApp:
def usage(self, args):
print "usage: %s [-h [-t TITLE] | -d FILE | -e FILE | --add-dict=FILE | --add-exc=FILE]" % (args[0],)
print "%s reads from standard input and writes to standard output." % (args[0],)
print "options:"
print " -h Generate HTML output."
print " -t TITLE Use TITLE as the report heading."
print " -d FILE Replaces the built-in dictionary with FILE."
print " -e FILE Replaces the built-in exclusion list with FILE."
print " --add-dict=FILE Processes FILE as a category dictionary."
print " --add-exc=FILE Processes FILE as an exlusion list."
def run(self, args):
rid = RegressiveImageryDictionary()
load_default_dict = True
load_default_exc = True
html_output = False
title = "RID Analysis"
try:
optlist, args = getopt.getopt(sys.argv[1:], 'd:e:ht:',
['add-dict=', 'add-exc='])
for (o, v) in optlist:
if o == '-d':
rid.load_dictionary_from_file(v)
load_default_dict = False
elif o == '-e':
rid.load_exclusion_list_from_file(v)
load_default_exc = False
elif o == '--add-dict':
rid.load_dictionary_from_file(v)
elif o == '--add-exc':
rid.load_exclusion_list_from_file(v)
elif o == '-h':
html_output = True
elif o == '-t':
title = v
else:
sys.stderr.write("%s: illegal option '%s'\n" % (args[0], o))
self.usage(args)
except getopt.GetoptError, e:
sys.stderr.write("%s: %s\n" % (args[0], e.msg))
self.usage(args)
sys.exit(1)
if load_default_dict:
rid.load_dictionary_from_string(DEFAULT_RID_DICTIONARY)
if load_default_exc:
rid.load_exclusion_list_from_string(DEFAULT_RID_EXCLUSION_LIST)
results = rid.analyze(sys.stdin.read())
if html_output:
rid.display_results_html(results, title)
else:
rid.display_results(results)
if __name__ == '__main__':
app = RIDApp()
app.run(sys.argv)
#######################################################################################################
# From trac.util.compat.py
# Implementation for sorted() for Python versions prior to 2.4
try:
reversed = reversed
except NameError:
def reversed(x):
if hasattr(x, 'keys'):
raise ValueError('mappings do not support reverse iteration')
i = len(x)
while i > 0:
i -= 1
yield x[i]
try:
sorted = sorted
except NameError:
def sorted(iterable, cmp=None, key=None, reverse=False):
"""Partial implementation of the "sorted" function from Python 2.4"""
if key is None:
lst = list(iterable)
else:
lst = [(key(val), idx, val) for idx, val in enumerate(iterable)]
lst.sort()
if key is None:
if reverse:
return lst[::-1]
return lst
if reverse:
lst = reversed(lst)
return [i[-1] for i in lst]
#######################################################################################################
rid = RegressiveImageryDictionary()
rid.load_dictionary_from_string(DEFAULT_RID_DICTIONARY)
rid.load_exclusion_list_from_string(DEFAULT_RID_EXCLUSION_LIST)
# A list of subcategories for each top category, e.g. emotions ->
# ['anxiety', 'glory', 'positive affect', 'sadness', 'expressive behavior', 'affection', 'aggression']
primary = [key.lower() for key in rid.category_tree.children["PRIMARY"].children.keys()]
secondary = [key.lower() for key in rid.category_tree.children["SECONDARY"].children.keys()]
emotions = [key.lower() for key in rid.category_tree.children["EMOTIONS"].children.keys()]
class RIDScoreItem:
def __init__(self, name, count, words, type):
self.name = name
self.count = count
self.words = words
self.type = type
def __str__(self):
return self.name
class RIDScore(list):
def __init__(self, rid, results):
self.primary = 0
self.secondary = 0
self.emotions = 0
self.count(rid, results)
self.populate(results)
def count(self, rid, results):
# Keep a count per top category
# (primary, secondary, emotional)
score = {}
roots = rid.category_tree.children
for key in roots:
score[key] = 0
# Calculate the total count.
# Increase the count for the top category each category belongs to.
total = 0
for category in results.category_count:
count = results.category_count[category]
total += count
for key in roots:
if category.isa(roots[key]):
score[key] += count
# Relativise the score for each top category.
if total > 0:
for key in score:
score[key] = float(score[key]) / total
self.primary = score["PRIMARY"]
self.secondary = score["SECONDARY"]
self.emotions = score["EMOTIONS"]
def populate(self, results):
# A RIDScore is a sorted list of category items,
# with relevant words found in the text assigned to each category.
for (category, count) in sorted(results.category_count.items(), key=lambda x: x[1], reverse=True):
c = RIDScoreItem(
name=category.name.lower(),
count=count,
words=results.category_words[category],
type=category.parent.name.lower()
)
self.append(c)
def __str__(self):
return str([str(item) for item in self])
def categorise(txt):
global rid
results = rid.analyze(txt)
return RIDScore(rid, results) | Python |
# PLURAL - last updated for NodeBox 1rc7
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used to solve things like
# "mother-in-law" or "man at arms"
plural_prepositions = ["about", "above", "across", "after", "among", "around", "at", "athwart", "before", "behind", "below", "beneath", "beside", "besides", "between", "betwixt", "beyond", "but", "by", "during", "except", "for", "from", "in", "into", "near", "of", "off", "on", "onto", "out", "over", "since", "till", "to", "under", "until", "unto", "upon", "with"]
# Inflection rules that are either general,
# or apply to a certain category of words,
# or apply to a certain category of words only in classical mode,
# or apply only in classical mode.
# Each rule consists of:
# suffix, inflection, category and classic flag.
plural_rules = [
# 0/ Indefinite articles and demonstratives.
[
["^a$|^an$", "some", None, False],
["^this$", "these", None, False],
["^that$", "those", None, False],
["^any$", "all", None, False]
],
# 1/ Possessive adjectives.
# Overlaps with 1/ for "his" and "its".
# Overlaps with 2/ for "her".
[
["^my$", "our", None, False],
["^your$|^thy$", "your", None, False],
["^her$|^his$|^its$|^their$", "their", None, False]
],
# 2/
# Possessive pronouns.
[
["^mine$", "ours", None, False],
["^yours$|^thine$", "yours", None, False],
["^hers$|^his$|^its$|^theirs$", "theirs", None, False]
],
# 3/
# Personal pronouns.
[
["^I$", "we", None, False],
["^me$", "us", None, False],
["^myself$", "ourselves", None, False],
["^you$", "you", None, False],
["^thou$|^thee$", "ye", None, False],
["^yourself$|^thyself$", "yourself", None, False],
["^she$|^he$|^it$|^they$", "they", None, False],
["^her$|^him$|^it$|^them$", "them", None, False],
["^herself$|^himself$|^itself$|^themself$", "themselves", None, False],
["^oneself$", "oneselves", None, False]
],
# 4/
# Words that do not inflect.
[
["$", "", "uninflected", False],
["$", "", "uncountable", False],
["s$", "s", "s-singular", False],
["fish$", "fish", None, False],
["([- ])bass$", "\\1bass", None, False],
["ois$", "ois", None, False],
["sheep$", "sheep", None, False],
["deer$", "deer", None, False],
["pox$", "pox", None, False],
["([A-Z].*)ese$", "\\1ese", None, False],
["itis$", "itis", None, False],
["(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False]
],
# 5/
# Irregular plurals.
# (mongoose, oxen).
[
["atlas$", "atlantes", None, True],
["atlas$", "atlases", None, False],
["beef$", "beeves", None, True],
["brother$", "brethren", None, True],
["child$", "children", None, False],
["corpus$", "corpora", None, True],
["corpus$", "corpuses", None, False],
["^cow$", "kine", None, True],
["ephemeris$", "ephemerides", None, False],
["ganglion$", "ganglia", None, True],
["genie$", "genii", None, True],
["genus$", "genera", None, False],
["graffito$", "graffiti", None, False],
["loaf$", "loaves", None, False],
["money$", "monies", None, True],
["mongoose$", "mongooses", None, False],
["mythos$", "mythoi", None, False],
["octopus$", "octopodes", None, True],
["opus$", "opera", None, True],
["opus$", "opuses", None, False],
["^ox$", "oxen", None, False],
["penis$", "penes", None, True],
["penis$", "penises", None, False],
["soliloquy$", "soliloquies", None, False],
["testis$", "testes", None, False],
["trilby$", "trilbys", None, False],
["turf$", "turves", None, True],
["numen$", "numena", None, False],
["occiput$", "occipita", None, True],
],
# 6/
# Irregular inflections for common suffixes
# (synopses, mice, men).
[
["man$", "men", None, False],
["person$", "people", None, False],
["([lm])ouse$", "\\1ice", None, False],
["tooth$", "teeth", None, False],
["goose$", "geese", None, False],
["foot$", "feet", None, False],
["zoon$", "zoa", None, False],
["([csx])is$", "\\1es", None, False]
],
# 7/
# Fully assimilated classical inflections
# (vertebrae, codices).
[
["ex$", "ices", "ex-ices", False],
["ex$", "ices", "ex-ices-classical", True],
["um$", "a", "um-a", False],
["um$", "a", "um-a-classical", True],
["on$", "a", "on-a", False],
["a$", "ae", "a-ae", False],
["a$", "ae", "a-ae-classical", True]
],
# 8/
# Classical variants of modern inflections
# (stigmata, soprani).
[
["trix$", "trices", None, True],
["eau$", "eaux", None, True],
["ieu$", "ieu", None, True],
["([iay])nx$", "\\1nges", None, True],
["en$", "ina", "en-ina-classical", True],
["a$", "ata", "a-ata-classical", True],
["is$", "ides", "is-ides-classical", True],
["us$", "i", "us-i-classical", True],
["us$", "us", "us-us-classical", True],
["o$", "i", "o-i-classical", True],
["$", "i", "-i-classical", True],
["$", "im", "-im-classical", True]
],
# 9/
# -ch, -sh and -ss take -es in the plural
# (churches, classes).
[
["([cs])h$", "\\1hes", None, False],
["ss$", "sses", None, False],
["x$", "xes", None, False]
],
# 10/
# Certain words ending in -f or -fe take -ves in the plural
# (lives, wolves).
[
["([aeo]l)f$", "\\1ves", None, False],
["([^d]ea)f$", "\\1ves", None, False],
["arf$", "arves", None, False],
["([nlw]i)fe$", "\\1ves", None, False],
],
# 11/
# -y takes -ys if preceded by a vowel,
# or when a proper noun,
# but -ies if preceded by a consonant
# (storeys, Marys, stories).
[
["([aeiou])y$", "\\1ys", None, False],
["([A-Z].*)y$", "\\1ys", None, False],
["y$", "ies", None, False]
],
# 12/
# Some words ending in -o take -os,
# the rest take -oes.
# Words in which the -o is preceded by a vowel always take -os
# (lassos, potatoes, bamboos).
[
["o$", "os", "o-os", False],
["([aeiou])o$", "\\1os", None, False],
["o$", "oes", None, False]
],
# 13/
# Miltary stuff (Major Generals).
[
["l$", "ls", "general-generals", False]
],
# 14/
# Otherwise, assume that the plural just adds -s
# (cats, programmes).
[
["$", "s", None, False]
],
]
# Suffix categories
plural_categories = {
"uninflected" : ["bison", "bream", "breeches", "britches", "carp", "chassis", "clippers", "cod", "contretemps", "corps", "debris", "diabetes", "djinn", "eland", "elk", "flounder", "gallows", "graffiti", "headquarters", "herpes", "high-jinks", "homework", "innings", "jackanapes", "mackerel", "measles", "mews", "mumps", "news", "pincers", "pliers", "proceedings", "rabies", "salmon", "scissors", "series", "shears", "species", "swine", "trout", "tuna", "whiting", "wildebeest"],
"uncountable" : ["advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture", "garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage", "mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice", "sand", "software", "understanding", "water"],
"s-singular" : ["acropolis", "aegis", "alias", "asbestos", "bathos", "bias", "caddis", "cannabis", "canvas", "chaos", "cosmos", "dais", "digitalis", "epidermis", "ethos", "gas", "glottis", "glottis", "ibis", "lens", "mantis", "marquis", "metropolis", "pathos", "pelvis", "polis", "rhinoceros", "sassafras", "trellis"],
"ex-ices" : ["codex", "murex", "silex"],
"ex-ices-classical" : ["apex", "cortex", "index", "latex", "pontifex", "simplex", "vertex", "vortex"],
"um-a" : ["agendum", "bacterium", "candelabrum", "datum", "desideratum", "erratum", "extremum", "ovum", "stratum"],
"um-a-classical" : ["aquarium", "compendium", "consortium", "cranium", "curriculum", "dictum", "emporium", "enconium", "gymnasium", "honorarium", "interregnum", "lustrum", "maximum", "medium", "memorandum", "millenium", "minimum", "momentum", "optimum", "phylum", "quantum", "rostrum", "spectrum", "speculum", "stadium", "trapezium", "ultimatum", "vacuum", "velum"],
"on-a" : ["aphelion", "asyndeton", "criterion", "hyperbaton", "noumenon", "organon", "perihelion", "phenomenon", "prolegomenon"],
"a-ae" : ["alga", "alumna", "vertebra"],
"a-ae-classical" : ["abscissa", "amoeba", "antenna", "aurora", "formula", "hydra", "hyperbola", "lacuna", "medusa", "nebula", "nova", "parabola"],
"en-ina-classical" : ["foramen", "lumen", "stamen"],
"a-ata-classical" : ["anathema", "bema", "carcinoma", "charisma", "diploma", "dogma", "drama", "edema", "enema", "enigma", "gumma", "lemma", "lymphoma", "magma", "melisma", "miasma", "oedema", "sarcoma", "schema", "soma", "stigma", "stoma", "trauma"],
"is-ides-classical" : ["clitoris", "iris"],
"us-i-classical" : ["focus", "fungus", "genius", "incubus", "nimbus", "nucleolus", "radius", "stylus", "succubus", "torus", "umbilicus", "uterus"],
"us-us-classical" : ["apparatus", "cantus", "coitus", "hiatus", "impetus", "nexus", "plexus", "prospectus", "sinus", "status"],
"o-i-classical" : ["alto", "basso", "canto", "contralto", "crescendo", "solo", "soprano", "tempo"],
"-i-classical" : ["afreet", "afrit", "efreet"],
"-im-classical" : ["cherub", "goy", "seraph"],
"o-os" : ["albino", "archipelago", "armadillo", "commando", "ditto", "dynamo", "embryo", "fiasco", "generalissimo", "ghetto", "guano", "inferno", "jumbo", "lingo", "lumbago", "magneto", "manifesto", "medico", "octavo", "photo", "pro", "quarto", "rhino", "stylo"],
"general-generals" : ["Adjutant", "Brigadier", "Lieutenant", "Major", "Quartermaster",
"adjutant", "brigadier", "lieutenant", "major", "quartermaster"],
}
NOUN = "noun"
ADJECTIVE = "adjective"
def plural(word, pos=NOUN, classical=True, custom={}):
""" Returns the plural of a given word.
For example: child -> children.
Handles nouns and adjectives, using classical inflection by default
(e.g. where "matrix" pluralizes to "matrices" instead of "matrixes".
The custom dictionary is for user-defined replacements.
"""
if word in custom.keys():
return custom[word]
# Recursion of genitives
# remove the apostrophe and any trailing -s,
# form the plural of the resultant noun, and then append an apostrophe.
# (dog's -> dogs')
if (len(word) > 0 and word[-1] == ",") or \
(len(word) > 1 and word[-2:] == "'s"):
owner = word.rstrip("'s")
owners = plural(owner, classical, custom)
if owners[-1] == "s":
return owners + "'"
else:
return owners + "'s"
# Recursion of compound words
# (Postmasters General, mothers-in-law, Roman deities).
words = word.replace("-", " ").split(" ")
if len(words) > 1:
if words[1] == "general" or words[1] == "General" and \
words[0] not in categories["general-generals"]:
return word.replace(words[0], plural(words[0], classical, custom))
elif words[1] in plural_prepositions:
return word.replace(words[0], plural(words[0], classical, custom))
else:
return word.replace(words[-1], plural(words[-1], classical, custom))
# Only a very few number of adjectives inflect.
n = range(len(plural_rules))
if pos == ADJECTIVE:
n = [0, 1]
import re
for i in n:
ruleset = plural_rules[i]
for rule in ruleset:
suffix, inflection, category, classic = rule
# A general rule,
# or a classic rule in classical mode.
if category == None:
if not classic or (classic and classical):
if re.search(suffix, word) is not None:
return re.sub(suffix, inflection, word)
# A rule relating to a specific category of words
if category != None:
if word in plural_categories[category] and (not classic or (classic and classical)):
if re.search(suffix, word) is not None:
return re.sub(suffix, inflection, word)
return word
#print plural("part-of-speech")
#print plural("child")
#print plural("dog's")
#print plural("wolf")
#print plural("bear")
#print plural("kitchen knife")
#print plural("octopus", classical=True)
#print plural("matrix", classical=True)
#print plural("matrix", classical=False)
#print plural("my", pos=ADJECTIVE)
def noun_plural(word, classical=True, custom={}):
return plural(word, NOUN, classical, custom)
def adjective_plural(word, classical=True, custom={}):
return plural(word, ADJECTIVE, classical, custom) | Python |
#!/usr/bin/env python
#
# Copyright 2007 John Wiseman <jjwiseman@yahoo.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import sys
import StringIO
import getopt
class RegressiveImageryDictionary:
"""
To use:
1. Load a dictionary.
2. Load an exclusion list (optional).
3. Call analyze.
4. Call display_results with the value returned by analyze.
"""
def __init__(self):
self.category_tree = CategoryRoot()
self.exclusion_patterns = []
self.exclusion_pattern = None
self.pattern_tree = DiscriminationTree('root', None)
def load_dictionary_from_file(self, path):
rid_in = open(path, "r")
try:
self.load_dictionary(rid_in)
finally:
rid_in.close()
def load_dictionary_from_string(self, string):
rid_in = StringIO.StringIO(string)
self.load_dictionary(rid_in)
def load_dictionary(self, stream):
primary_category = None
secondary_category = None
tertiary_category = None
for line in stream:
num_tabs = count_leading_tabs(line)
# The dictionary is in kind of a weird format.
if num_tabs == 0:
primary_category = line.strip()
secondary_category = None
tertiary_category = None
elif num_tabs == 1:
secondary_category = line.strip()
tertiary_category = None
elif num_tabs == 2 and not '(' in line:
tertiary_category = line.strip()
else:
# We have a word pattern.
pattern = line.strip().split(' ')[0].lower()
category = self.ensure_category(primary_category, secondary_category, tertiary_category)
category.add_word_pattern(pattern)
self.pattern_tree.put(pattern, category)
def load_exclusion_list_from_file(self, path):
exc_in = open(path, "r")
try:
self.load_exclusion_list(exc_in)
finally:
exc_in.close()
def load_exclusion_list_from_string(self, string):
exc_in = StringIO.StringIO(string)
self.load_exclusion_list(exc_in)
def load_exclusion_list(self, stream):
for line in stream:
pattern = line.strip().lower()
pattern = pattern.replace("*", ".*")
self.exclusion_patterns.append(pattern)
# One megapattern to exclude them all
self.exclusion_pattern = re.compile('^(' + '|'.join(self.exclusion_patterns) + ')$')
def token_is_excluded(self, token):
return self.exclusion_pattern.match(token)
def get_category(self, word):
categories = self.pattern_tree.retrieve(word)
if categories:
return categories[0]
def analyze(self, text):
results = RIDResults()
def increment_category(category, token):
if not category in results.category_count:
results.category_count[category] = 0
results.category_words[category] = []
results.category_count[category] += 1
results.category_words[category].append(token)
tokens = tokenize(text)
results.word_count = len(tokens)
for token in tokens:
if not self.token_is_excluded(token):
category = self.get_category(token)
if category != None:
increment_category(category, token)
return results
def display_results(self, results):
# Detailed category breakout
total_count = 0
for (category, count) in sorted(results.category_count.items(), key=lambda x: x[1], reverse=True):
print "%-60s %5s" % (category.full_name(), count)
print " " + " ".join(results.category_words[category])
total_count += count
# Summary for each top-level category
top_categories = self.category_tree.children.values()
def get_top_category(cat):
for top_cat in top_categories:
if cat.isa(top_cat):
return top_cat
print "Category %s doesn't exist in %s" % (category, top_categories)
top_category_counts = {}
for top_category in top_categories:
top_category_counts[top_category] = 0
for category in results.category_count:
top_category = get_top_category(category)
if top_category:
top_category_counts[top_category] += results.category_count[category]
print ""
def percent(x, y):
if y == 0:
return 0
else:
return (100.0 * x) / y
for top_category in top_categories:
count = top_category_counts[top_category]
print "%-20s: %f %%" % (top_category.full_name(), percent(count, total_count))
# Word count
print "\n%d words total" % (results.word_count,)
def display_results_html(self, results, title):
# Detailed category breakout
total_count = 0
print "<html><head>"
print "<meta http-equiv='content-type' content='text/html; charset=UTF-8'>"
print """
<style type="text/css">
.word-count { vertical-align: super; font-size: 50%; }
.twisty { color: blue; font-family: monospace; }
a.twisty { text-decoration: none; }
</style>
"""
print "<title>%s</title>" % (title,)
print """
<script>
var TWISTY_EXPANDED = ' ▾ ';
var TWISTY_COLLAPSED = ' ▸ ';
function allWordNodes() {
var nodes = document.getElementsByTagName("tr");
var results = new Array();
var numResults = 0;
for (i = 0; i < nodes.length; i++) {
var node = nodes.item(i);
if (node.className == 'words') {
results[numResults] = node;
numResults++;
}
}
return results;
}
function hideAll() {
allNodes = allWordNodes();
for (var i = 0; i < allNodes.length; i++) {
hide(allNodes[i]);
}
}
function showAll() {
allNodes = allWordNodes();
for (var i = 0; i < allNodes.length; i++) {
show(allNodes[i]);
}
}
function get_twisty_node(category) {
var cell = document.getElementById(category + "-cat");
return cell.childNodes[0];
}
function hide(element) {
element.style.display = "none";
var twisty = get_twisty_node(element.id);
twisty.innerHTML = TWISTY_COLLAPSED;
}
function show(element) {
element.style.display = "";
var twisty = get_twisty_node(element.id);
twisty.innerHTML = TWISTY_EXPANDED;
}
function toggle(cat) {
var node = document.getElementById(cat)
if (node.style.display == "none") {
show(node);
} else {
hide(node);
}
}
</script>
"""
print "</head><body>"
print "<h1>%s</h1>" % (title,)
print "<p><a href='javascript:hideAll()'>- collapse all</a> <a href='javascript:showAll()'>+ expand all</a></p>"
print "<table width='100%'>"
for (category, count) in sorted(results.category_count.items(), key=lambda x: x[1], reverse=True):
sys.stdout.write("<tr>")
sys.stdout.write("<td class='%s' id='%s'>" % ("category", category.full_name() + "-cat"))
sys.stdout.write("""<a class='twisty' href="javascript:toggle('%s')"><span class='twisty'> ▾ </span></a>""" % (category.full_name(),))
sys.stdout.write("%s</td><td width='*' align='right'>%s</td></tr>""" % (category.full_name(), count))
print "<tr class='%s' id='%s'>" % ("words", category.full_name())
print "<td style='padding-left: 1cm;' colspan='2'>"
words = uniq_c(results.category_words[category])
for word in words:
sys.stdout.write("%s<span class='word-count'>%s</span> " % (word))
print "\n</td></tr>"
total_count += count
print "</table>"
# Summary for each top-level category
top_categories = self.category_tree.children.values()
def get_top_category(cat):
for top_cat in top_categories:
if cat.isa(top_cat):
return top_cat
print "Category %s doesn't exist in %s" % (category, top_categories)
top_category_counts = {}
for top_category in top_categories:
top_category_counts[top_category] = 0
for category in results.category_count:
top_category = get_top_category(category)
if top_category:
top_category_counts[top_category] += results.category_count[category]
def percent(x, y):
if y == 0:
return 0
else:
return (100.0 * x) / y
print "<table>"
for top_category in top_categories:
count = top_category_counts[top_category]
print "<tr><td>%s:</td><td>%f %%</td></tr>" % (top_category.full_name(), percent(count, total_count))
print "<table>"
# Word count
print "<p>%d words total</p>" % (results.word_count,)
print "</body></html>"
def ensure_category(self, *args):
def ensure_cat_aux(category, category_path):
if len(category_path) == 0 or category_path[0] == None:
return category
else:
cat = category_path.pop(0)
if not cat in category.children:
category.children[cat] = Category(cat, category)
return ensure_cat_aux(category.children[cat], category_path)
return ensure_cat_aux(self.category_tree, list(args))
class RIDResults:
def __init__(self):
self.category_count = {}
self.category_words = {}
self.word_count = 0
WORD_REGEX = re.compile(r'[^a-zA-Z]+')
def tokenize(string):
tokens = WORD_REGEX.split(string.lower())
tokens = filter(lambda token: len(token) > 0, tokens)
return tokens
def count_leading_tabs(string):
for i, char in enumerate(string):
if char != '\t':
return i
class DiscriminationTree:
"""
This is the discrimination tree we use for mapping words to
categories. The put method is used to insert category nodes in the
tree, associated with some word pattern. The retrieve method finds
the category for a given word, if one exists.
"""
def __init__(self, index, parent):
self.index = index
self.parent = parent
self.leaves = []
self.interiors = []
self.is_wildcard = False
def __str__(self):
return "<DiscriminationTree %s>" % (self.index,)
def child_matching_index(self, index):
for child in self.interiors:
if child.index == index:
return child
return None
def retrieve(self, path):
if len(path) == 0 or self.is_wildcard:
return self.leaves
else:
next_index = path[0]
next_disc_tree = self.child_matching_index(next_index)
if next_disc_tree == None:
return
else:
return next_disc_tree.retrieve(path[1:])
def put(self, path, leaf):
if len(path) == 0:
if isinstance(leaf, DiscriminationTree):
self.interiors.append(leaf)
else:
self.leaves.append(leaf)
return True
else:
next_index = path[0]
if next_index == '*':
# Got a '*' so this is a wildcard node that will match
# anything that reaches it.
self.is_wildcard = True
self.leaves.append(leaf)
else:
next_disc_tree = self.child_matching_index(next_index)
if next_disc_tree == None:
next_disc_tree = DiscriminationTree(next_index, self)
self.interiors.append(next_disc_tree)
next_disc_tree.put(path[1:], leaf)
def dump(self, stream=sys.stdout, indent=0):
stream.write("\n" + " "*indent + str(self))
for child in self.leaves:
stream.write("\n" + " "*(indent + 3) + str(child))
for child in self.interiors:
child.dump(stream=stream, indent=indent + 3)
class Category:
def __init__(self, name, parent):
self.name = name
self.parent = parent
self.children = {}
self.leaves = []
def __str__(self):
return "<Category %s>" % (self.full_name(),)
def add_word_pattern(self, pattern):
self.leaves.append(pattern)
def full_name(self):
if self.parent == None or isinstance(self.parent, CategoryRoot):
return self.name
else:
return self.parent.full_name() + ":" + self.name
def isa(self, parent):
return parent == self or (self.parent and self.parent.isa(parent))
class CategoryRoot(Category):
def __init__(self):
Category.__init__(self, 'root', None)
def full_name(self):
return ""
def uniq_c(words):
words.sort()
results = []
last_word = words[0]
last_word_count = 1
for word in words[1:]:
if word == last_word:
last_word_count += 1
else:
results.append((last_word, last_word_count))
last_word = word
last_word_count = 1
results.append((last_word, last_word_count))
results = sorted(results, key=lambda x: x[1], reverse=True)
return results
# This dictionary is the one from
# http://www.provalisresearch.com/Download/RID.ZIP with misspellings
# fixed.
DEFAULT_RID_DICTIONARY = """
PRIMARY
NEED
ORALITY
ABSINTH* (1)
ALE (1)
ALES (1)
ALIMENTARY (1)
AMBROSIA* (1)
AMBROSIAL* (1)
APPETIT* (1)
APPLE* (1)
ARTICHOK* (1)
ASPARAGU* (1)
BACON* (1)
BANANA* (1)
BEAN* (1)
BEEF* (1)
BEER* (1)
BELCH* (1)
BELLIES (1)
BELLY (1)
BERRI* (1)
BERRY* (1)
BEVERAG* (1)
BISCUIT* (1)
BITE* (1)
BITE (1)
BITES (1)
BITING (1)
BITTEN* (1)
BONBON* (1)
BRANDY* (1)
BREAD* (1)
BREAKFAST* (1)
BREAST* (1)
BREW* (1)
BROTH (1)
BURP* (1)
BUTTER* (1)
BUTTERMILK* (1)
CAFE (1)
CAFES (1)
CAKE (1)
CAKES (1)
CAFETARIA (1)
CANDY* (1)
CANNIBAL* (1)
CAVIAR* (1)
CHAMPAGN* (1)
CHEES* (1)
CHERRI* (1)
CHERRY* (1)
CHESTNUT* (1)
CHEW* (1)
CHOK* (1)
CIDER* (1)
CLARET* (1)
COB (1)
COBS (1)
COCOA* (1)
COCOANUT* (1)
COCONUT* (1)
COFFE* (1)
CONSUM* (1)
COOK* (1)
CORN* (1)
COUGH* (1)
CRANBERRY* (1)
CREAM* (1)
DELICACI* (1)
DELICACY* (1)
DESSERT* (1)
DEVOUR* (1)
DIET* (1)
DIGEST* (1)
DINE (1)
DINES (1)
DINING (1)
DINNER* (1)
DISH (1)
DISHES (1)
DRANK* (1)
DRINK* (1)
DRUNK* (1)
DRUNKEN* (1)
EAT* (1)
EATEN* (1)
EGG* (1)
ENTRAIL* (1)
FAMIN* (1)
FAMISH* (1)
FAST (1)
FASTS (1)
FAT (1)
FATTEN* (1)
FEAST* (1)
FED (1)
FEED (1)
FEEDS (1)
FIG (1)
FIGS (1)
FLOUR* (1)
FOOD* (1)
FOODSTUFF* (1)
FORK* (1)
FRUIT* (1)
GARLIC* (1)
GIN (1)
GINGER* (1)
GINS (1)
GLUTTON* (1)
GLUTTONOU* (1)
GNAW* (1)
GOBBL* (1)
GRAIN* (1)
GRAP* (1)
GROG* (1)
GRUEL* (1)
GULP* (1)
GUM (1)
GUMS (1)
GUT (1)
GUTS (1)
HAM (1)
HAMS (1)
HERB* (1)
HONEY* (1)
HUNGER* (1)
HUNGRY* (1)
IMBIB* (1)
INEDIBL* (1)
INTESTIN* (1)
JAW* (1)
JUIC* (1)
LAP (1)
LAPS (1)
LEMON* (1)
LICK* (1)
LIME (1)
LIMES (1)
LIP (1)
LIPS (1)
LIQUEUR* (1)
LIQUOR* (1)
LUNCH* (1)
MAIZ* (1)
MEAL* (1)
MEAT* (1)
MELON* (1)
MENU* (1)
MILK* (1)
MINT* (1)
MORSEL* (1)
MOUTH* (1)
MOUTHFUL* (1)
MUSHROOM* (1)
MUTTON* (1)
NAUS* (1)
NECTAR* (1)
NIBBL* (1)
NOURISH* (1)
NOURISHMENT* (1)
NURTUR* (1)
NUT (1)
NUTS (1)
OLIV* (1)
ORAL* (1)
PALAT* (1)
PARTAK* (1)
PASTRI* (1)
PASTRY* (1)
PEA (1)
PEANUT* (1)
PEAR* (1)
PEAS (1)
PEPPER* (1)
PHILTR* (1)
PINEAPPL* (1)
POISON* (1)
PORK* (1)
PORRIDG* (1)
POT (1)
POTATO* (1)
POTBEL* (1)
POTS (1)
PUCKER* (1)
PUMPKIN* (1)
QUENCH* (1)
RASPBERRY* (1)
RAW (1)
RAWLY (1)
REPAST* (1)
RESTAURANT* (1)
RESTAURENT* (1)
RICE (1)
RICES (1)
RIPENES* (1)
ROAST* (1)
RUM (1)
RUMS (1)
SALAD* (1)
SALIVA* (1)
SALIVAT* (1)
SALT* (1)
SAUC* (1)
SAUERKRAUT* (1)
SESAM* (1)
SHERBERT* (1)
SHERRY* (1)
SOUP* (1)
SPAT* (1)
SPIT* (1)
SPITTL* (1)
SPOON* (1)
STARV* (1)
STARVAT* (1)
STOMACH* (1)
STRAWBERRI* (1)
STRAWBERRY* (1)
SUCK* (1)
SUCKL* (1)
SUGAR* (1)
SUPPER* (1)
SWALLOW* (1)
TEA (1)
TEAS (1)
TEAT* (1)
TEETH* (1)
THIRST* (1)
THIRSTY* (1)
THROAT* (1)
TIT (1)
TITS (1)
TOMATO* (1)
TONGU* (1)
TOOTH* (1)
UNCOOK* (1)
VEAL* (1)
VEGETABL* (1)
VENISON* (1)
VODKA* (1)
VOMIT* (1)
WHEAT* (1)
WHISKEY* (1)
WHISKY* (1)
YAM (1)
YAMS* (1)
YEAST* (1)
ANALITY
ANAL (1)
ANUS (1)
ANUSES (1)
ARSE (1)
ARSEHOL* (1)
ASSES (1)
ASS-HOL* (1)
ASSHOL* (1)
BESHAT* (1)
BESHIT* (1)
BESMEAR* (1)
BILE* (1)
BOWEL* (1)
BUTTOCK* (1)
CACK* (1)
CESSPOOL* (1)
CLOACA* (1)
CLOT (1)
CLOTS* (1)
CONSTIPAT* (1)
DANK* (1)
DAUB* (1)
DEFECAT* (1)
DEFIL* (1)
DELOUS* (1)
DIARRHOEA* (1)
DIRT* (1)
DIRTY* (1)
DISGUST* (1)
DUNG* (1)
DUNGHILL* (1)
EFFLUVIA* (1)
EFFLUVIUM* (1)
ENEMA* (1)
EXCRET* (1)
FART (1)
FARTS (1)
FECAL* (1)
FECES (1)
FETID* (1)
FETOR* (1)
FILTH* (1)
FILTHY* (1)
IMPUR* (1)
LATRIN* (1)
LICE (1)
LOATHSOM* (1)
LOUS* (1)
MAGGOT* (1)
MAGGOTY* (1)
MALODOROU* (1)
MALODOUROU* (1)
MANUR* (1)
MESS (1)
MESSES (1)
MESSING (1)
MIASMA* (1)
MUD (1)
MUDDY* (1)
MUDS (1)
OFFAL* (1)
OOZ* (1)
OOZY* (1)
OUTHOUS* (1)
PISS* (1)
POLLUT* (1)
PUTRESCENC* (1)
PUTRESCENT* (1)
PUTRID* (1)
RANCID* (1)
RECTUM* (1)
REEK* (1)
ROT (1)
ROTS (1)
ROTTEN* (1)
ROTTING (1)
RUMP* (1)
SCUM* (1)
SEWER* (1)
SHAT* (1)
SHIT* (1)
SLIMY* (1)
SMEAR* (1)
SODOMIST* (1)
SODOMY* (1)
SOIL* (1)
STAL* (1)
STENCH* (1)
STINK* (1)
SWEAT* (1)
UNCLEAN* (1)
UNWASH* (1)
URIN* (1)
SEX
VENEREAL* (1)
ADULTERER* (1)
ADULTERY* (1)
ALLUR* (1)
BAWD* (1)
BAWDY* (1)
BITCH* (1)
BROTHEL* (1)
CARESS* (1)
CARNAL* (1)
CIRCUMCIS* (1)
CLITORI* (1)
COHABIT* (1)
COITU* (1)
CONCUBIN* (1)
COPULAT* (1)
COQUETT* (1)
COQUETTISH* (1)
COURTESAN* (1)
CUCKOLD* (1)
CUNT* (1)
CUPID* (1)
DEBAUCH* (1)
DEFLOWER* (1)
EJACULAT* (1)
EROTIC* (1)
FONDL* (1)
FORNICAT* (1)
FUCK* (1)
GENITAL* (1)
GENITALIA* (1)
GIRDL* (1)
GROIN* (1)
HAREM* (1)
HARLOT* (1)
HOMOSEXUAL* (1)
HOMOSEXUALITY* (1)
IMMODEST* (1)
INCEST* (1)
INCESTUOU* (1)
INDECENT* (1)
INDISCRET* (1)
INFATUAT* (1)
KISS* (1)
LASCIVIOU* (1)
LECHER* (1)
LECHEROU* (1)
LECHERY* (1)
LEER (1)
LEERS (1)
LEWD* (1)
LIBERTIN* (1)
LICENTIOU* (1)
LOVER* (1)
LUST* (1)
LUSTFUL* (1)
LUSTY* (1)
MASTURBAT* (1)
MENSTRUAL* (1)
MENSTRUAT* (1)
MISTRES* (1)
NAKED (1)
NUDE (1)
NUDES (1)
OBSCEN* (1)
OBSCENITY* (1)
ORGASM* (1)
ORGI* (1)
ORGY* (1)
PANDER* (1)
PARAMOUR* (1)
PENI* (1)
PERVERS* (1)
PERVERT* (1)
PHALLIC* (1)
PHALLU* (1)
PREGNANCY* (1)
PREGNANT* (1)
PROCREAT* (1)
PROSTITUT* (1)
PRURIENT* (1)
PUBERTY* (1)
PUBI* (1)
PUBIC* (1)
RAPE* (1)
RAPING* (1)
RIBALD* (1)
SATYR* (1)
SEDUC* (1)
SENSUAL* (1)
SENSUOU* (1)
SEX (1)
SEXED (1)
SEXES (1)
SEX-LINKED (1)
SEXUAL* (1)
SEXY* (1)
SHAMELES* (1)
SLATTERN* (1)
SLUT* (1)
SLUTTY* (1)
TESTI* (1)
TESTICL* (1)
THIGH* (1)
TROLLOP* (1)
UNBLUSH* (1)
UNDRES* (1)
VAGINA* (1)
VENU* (1)
VOLUPTUOU* (1)
VULVA* (1)
WAIST* (1)
WANTON* (1)
WHOR* (1)
WOMB* (1)
SENSATION (1)
SENSATION
TOUCH
BRUSH* (1)
COARS* (1)
CONTACT* (1)
CUDD* (1)
CUDDL* (1)
HANDL* (1)
ITCH* (1)
ITCHY* (1)
MASSAG* (1)
PRICKL* (1)
ROUGH* (1)
RUB (1)
RUBB* (1)
RUBS (1)
SCALY (1)
SCRATCH* (1)
SHARP* (1)
SLICK* (1)
SLIPPERY* (1)
SMOOTH* (1)
SNUGGL* (1)
STING (1)
STINGS (1)
STROK* (1)
TEXTUR* (1)
THICK* (1)
TICKL* (1)
TINGL* (1)
TOUCH* (1)
WAXY* (1)
TASTE
AFTERTAST* (1)
BITTER* (1)
DELECTABL* (1)
DELICIOU* (1)
FLAVOR* (1)
GALL (1)
HONI* (1)
LUSCIOU* (1)
PIQUANT* (1)
SAVOR* (1)
SAVORY* (1)
SAVOUR* (1)
SAVOURY* (1)
SOUR* (1)
SPIC* (1)
SPICY* (1)
SUGARY* (1)
SWEET* (1)
SWEETNES* (1)
TANG* (1)
TANGY* (1)
TART* (1)
TAST* (1)
TASTY* (1)
TOOTHSOM* (1)
UNPALATABL* (1)
UNSAVORY* (1)
VINEGAR* (1)
VINEGARY* (1)
ODOR
AROMA* (1)
AROMATIC* (1)
BREATH* (1)
COLOGN* (1)
FRAGRANC* (1)
FRAGRANT* (1)
FUME* (1)
FUMING* (1)
INCENS* (1)
INHAL* (1)
MUSK* (1)
MUSKY* (1)
MUSTY* (1)
NOSE* (1)
NOSTRIL* (1)
ODOR* (1)
ODOUR* (1)
PERFUM* (1)
PUNGENC* (1)
PUNGENT* (1)
SCENT* (1)
SMEL* (1)
SMELL* (1)
SNIF* (1)
SNIFF* (1)
GENERAL-SENSATION
APPERCEIVE (1)
APPERCEPTIVE (1)
ATTENT* (1)
AWAR* (1)
AWARENES* (1)
BALMY* (1)
BASK* (1)
BEAUTIFUL* (1)
BEAUTY* (1)
CHARM* (1)
COMFORT* (1)
COMFORTABL* (1)
CREAMY* (1)
FAIR* (1)
IMPRESS* (1)
LOVELINES* (1)
LUSH* (1)
LUXURIOU* (1)
LUXURY* (1)
MILKY* (1)
OVERSENSITIV* (1)
PERCEIV* (1)
PERCEPT* (1)
PERCEPTUAL (1)
PHYSICAL* (1)
PLEASANT* (1)
PRETTY* (1)
REFRESH* (1)
RELISH* (1)
REVEL* (1)
SENSAT* (1)
SENSITIV* (1)
STIMULAT* (1)
SUMPTUOU* (1)
SOUND
AUDITORILLY (1)
ALOUD* (1)
AUDIBL* (1)
AUDITION (1)
AUDITORY* (1)
AURAL (1)
BANG* (1)
BELL* (1)
BINAURAL (1)
BLAR* (1)
BOOM* (1)
BUZZ* (1)
CHORD* (1)
CHORU* (1)
CLACK* (1)
CLAMOR* (1)
CLAMOUR* (1)
CLANG* (1)
CRACKL* (1)
CROAK* (1)
DEAF* (1)
DRON* (1)
DRUM* (1)
EAR (1)
EARS (1)
ECHO* (1)
HARK* (1)
HEAR* (1)
HEARD* (1)
HISS* (1)
HUM (1)
HUMM* (1)
HUMS (1)
LISTEN* (1)
LOUD* (1)
LOUDER* (1)
MELODI* (1)
MELODIOU* (1)
MELODY* (1)
MUFFL* (1)
MUSIC* (1)
MUSICAL* (1)
NOIS* (1)
NOISY* (1)
PEAL* (1)
PURR* (1)
RACKET* (1)
RASP* (1)
RATTL* (1)
RAUCOU* (1)
RESONANT* (1)
RESOUND* (1)
RHYTHM* (1)
RING* (1)
RUMBL* (1)
RUSTL* (1)
SERENAD* (1)
SHRILL* (1)
SNAP* (1)
SONOROU* (1)
SOUND* (1)
STRIDANT* (1)
STRIDENT* (1)
SWISH* (1)
SYMPHONY* (1)
TEMPO* (1)
THUD* (1)
TIMBR* (1)
TINKL* (1)
TONAL (1)
TONE (1)
TONED (1)
TONES (1)
TRILL* (1)
TUNE (1)
TUNED (1)
TUNES (1)
TUNING* (1)
UNHEARD* (1)
VOCAL* (1)
VOIC* (1)
WHIR* (1)
WHIRR* (1)
WHISTL* (1)
AFTER-IMAGE* (1)
VISION
BLINK* (1)
ILLUMINANT (1)
INVISIBILITY (1)
MONOCULAR (1)
AMBER* (1)
APPEAR* (1)
APPEARANC* (1)
AURORA* (1)
AZUR* (1)
BEAM* (1)
BEHOLD* (1)
BINOCULAR (1)
BLUE* (1)
BLUISH* (1)
BRIGHT* (1)
BROWN* (1)
BRUNETT* (1)
CHROMATIC* (1)
COLOR* (1)
COLOUR* (1)
COMPLEX* (1)
CRIMSON* (1)
DISCERN* (1)
DYE* (1)
EMERALD* (1)
FILM* (1)
FLASH* (1)
FLICKER* (1)
FLOURESCENT* (1)
GAZE* (1)
GAZING* (1)
GLANC* (1)
GLAR* (1)
GLEAM* (1)
GLIMPS* (1)
GLINT* (1)
GLISTEN* (1)
GLITTER* (1)
GLOSSY* (1)
GLOW* (1)
GRAY* (1)
GREEN* (1)
GREY* (1)
HALO* (1)
HUE* (1)
ILLUMINAT* (1)
IMAG* (1)
INVISIBL* (1)
LAMP* (1)
LANTERN* (1)
LAVENDER* (1)
LIGHT* (1)
LIGHTEN* (1)
LIGHTN* (1)
LIMPID* (1)
LOOK* (1)
LUCID* (1)
LUMINANCE (1)
LUMINOU* (1)
LUSTER* (1)
LUSTROU* (1)
MOONBEAM* (1)
MOONLIGHT* (1)
NOTIC* (1)
OBSERV* (1)
OPAQU* (1)
PAINT* (1)
PEEK* (1)
PEER* (1)
PICTUR* (1)
PINK* (1)
RADIANC* (1)
RADIANT* (1)
RAY (1)
RAYS (1)
REGARD* (1)
ROSY* (1)
ROUG* (1)
RUBY* (1)
RUDDY* (1)
SAPPHIR* (1)
SAW (1)
SCAN (1)
SCANN* (1)
SCANS (1)
SCARLET* (1)
SCEN* (1)
SCENIC* (1)
SEE (1)
SEEING* (1)
SEEN* (1)
SEES (1)
SHEEN* (1)
SHIMMER* (1)
SHIN* (1)
SHON* (1)
SIGHT* (1)
SPARKL* (1)
SPIED (1)
SPIES (1)
SPY (1)
SPYING* (1)
STAR (1)
STARLIGHT* (1)
STARS (1)
SUNLIGHT* (1)
SUNSHIN* (1)
SURVEY* (1)
TAN (1)
TANNED (1)
TANNING* (1)
TANS (1)
TINT* (1)
TRANSLUCENT* (1)
TRANSPARENT* (1)
TWINKL* (1)
UNSEEN* (1)
VIEW* (1)
VIOLET* (1)
VISIBL* (1)
VISION* (1)
VISUAL* (1)
WATCH* (1)
WITNES* (1)
YELLOW* (1)
COLD
ALASKA* (1)
ARCTIC* (1)
BENUMB* (1)
CHIL* (1)
CHILL* (1)
COLD* (1)
COLDER* (1)
COOL* (1)
FREEZ* (1)
FRIGID* (1)
FROST* (1)
FROSTBIT* (1)
FROZ* (1)
FROZEN* (1)
GLACIER* (1)
HOAR* (1)
ICE* (1)
ICINES* (1)
ICING* (1)
ICY (1)
NORTH* (1)
NORTHERN* (1)
NUMB (1)
NUMBNESS* (1)
POLAR* (1)
SHIVER* (1)
SIBERIA* (1)
SLEET* (1)
SNOW* (1)
SNOWSTORM* (1)
SNOWY* (1)
THUL* (1)
WINTER* (1)
WINTRY* (1)
HARD
ALABASTER* (1)
BRAS* (1)
BRASSY* (1)
BRAZEN* (1)
BRITTL* (1)
BRONZ* (1)
COPPER* (1)
CRISP* (1)
CRISPY* (1)
GLAS* (1)
GLASSY* (1)
GRANIT* (1)
GRAVEL* (1)
HARD* (1)
IRON* (1)
MARBL* (1)
METAL* (1)
METALLIC* (1)
NAIL* (1)
PEBB* (1)
PORCELAIN* (1)
RIGID* (1)
ROCK* (1)
SOLID* (1)
SPLINTER* (1)
STEEL* (1)
STIFF* (1)
STON* (1)
STONY* (1)
ZINC* (1)
SOFT
DAMASK* (1)
DELICAT* (1)
DOWNY* (1)
FEATHER* (1)
FLEEC* (1)
FLEECY* (1)
FLUFFY* (1)
GENTL* (1)
GENTLENES* (1)
GOSSAMER* (1)
LACE (1)
LACES (1)
LACING* (1)
LACY* (1)
MELLOW* (1)
MILD* (1)
MURMUR* (1)
PLIANT* (1)
POWDERY* (1)
SATIN* (1)
SATINY* (1)
SILK* (1)
SOFT* (1)
TENDER* (1)
TING* (1)
VELVET* (1)
VELVETY* (1)
WHISPER* (1)
DEFENSIVE SYMBOLIZATION
PASSIVITY
STAGNANT (1)
APATHETIC* (1)
APATHY* (1)
BED (1)
BEDD* (1)
BEDS (1)
BOREDOM* (1)
CALM* (1)
CONTENTED* (1)
CONTENTMENT* (1)
COUCH* (1)
COZY* (1)
DEAD* (1)
DEATH* (1)
DECAY* (1)
DIE (1)
DIED* (1)
DIES (1)
DORMANT* (1)
DRIFT* (1)
DYING* (1)
EASE* (1)
EASED (1)
EASES (1)
HUSH* (1)
IDL* (1)
IMMOBIL* (1)
INACTIV* (1)
INACTIVITY* (1)
INDIFFERENC* (1)
INDIFFERENT* (1)
INDOLENT* (1)
INERT* (1)
INERTIA* (1)
INNERT* (1)
LAID* (1)
LAIN* (1)
LANGOROU* (1)
LANGUID* (1)
LANGUISH* (1)
LANGUOR* (1)
LASSITUD* (1)
LAY (1)
LAYING* (1)
LAYS (1)
LAZY* (1)
LEADEN* (1)
LEISUR* (1)
LETHARGIC* (1)
LETHARGY* (1)
LIE (1)
LIES (1)
LINGER* (1)
LISTLES* (1)
LUL* (1)
LULL* (1)
MOTIONLES* (1)
NESTL* (1)
NONCHALANC* (1)
NONCHALANT* (1)
PASSIV* (1)
PASSIVITY* (1)
PEACEFUL* (1)
PERISH* (1)
PHLEGMATIC* (1)
PLACID* (1)
PROCRASTINAT* (1)
QUIET* (1)
RELAX* (1)
RELAXAT* (1)
REPOS* (1)
REST* (1)
RESTFUL* (1)
RETIR* (1)
SAFE (1)
SAFELY (1)
SAFETY* (1)
SECUR* (1)
SECURITY* (1)
SEDENTARY* (1)
SEREN* (1)
SERENITY* (1)
SILENC* (1)
SILENT* (1)
SLACK* (1)
SLOTHFUL* (1)
SLOW* (1)
SLUGGISH* (1)
SOLAC* (1)
SOOTH* (1)
STAGNAT* (1)
STATIC* (1)
STILLNES* (1)
SUBMISS* (1)
SUBMISSIV* (1)
SUBMIT* (1)
SUCCUMB* (1)
TRANQ* (1)
UNHURRI* (1)
VAGRANT* (1)
VELLEITY* (1)
WEARISOM* (1)
WEARY* (1)
YIELD* (1)
VOYAGE
CARAVAN* (1)
CHAS* (1)
CRUIS* (1)
DESERT* (1)
DRIV* (1)
EMBARK* (1)
EMIGRAT* (1)
EXPLOR* (1)
IMMIGRAT* (1)
IMMIGRANT* (1)
JOURNEY* (1)
MIGRAT* (1)
NAVIGAT* (1)
NOMAD* (1)
NOMADIC* (1)
OSCILLAT* (1)
PILGRIM* (1)
PILGRIMAG* (1)
RIDE (1)
RIDES (1)
RIDING* (1)
ROAM* (1)
RODE (1)
ROV* (1)
SAIL* (1)
SAILOR* (1)
SEAFAR* (1)
SEARCH* (1)
SHIP* (1)
STRAY* (1)
TOUR* (1)
TOURIST* (1)
TRAVEL* (1)
TREK* (1)
TRIP* (1)
VAGABOND* (1)
VOYAG* (1)
WANDER* (1)
WANDERLUST* (1)
WAYFARER* (1)
WILDERNES* (1)
YONDER* (1)
RANDOM MOVEMENT
ACTIVITI* (1)
ACTIVITY* (1)
AGITAT* (1)
CHURN* (1)
COMMOT* (1)
CONVULS* (1)
EXPAND* (1)
EXPANS* (1)
FIDGET* (1)
FLOUNDER* (1)
FLURRI* (1)
FLURRY* (1)
JERK* (1)
LURCH* (1)
ORBIT* (1)
PITCH* (1)
PIVOT* (1)
PULS* (1)
PULSAT* (1)
QUAK* (1)
QUIVER* (1)
REEL* (1)
REVOLV* (1)
ROL* (1)
ROLL* (1)
ROTAT* (1)
SEETH* (1)
SHAK* (1)
SHOOK* (1)
SPASM* (1)
SPIN* (1)
SPREAD* (1)
STAGGER* (1)
STIR* (1)
SWAY* (1)
SWEL* (1)
SWELL* (1)
SWIVEL* (1)
SWOLLEN* (1)
THROB* (1)
TOTTER* (1)
TWICH* (1)
TWIST* (1)
TWITCH* (1)
UNDULAT* (1)
VIBRAT* (1)
WAVE (1)
WAVED (1)
WAVES (1)
WAVING* (1)
WHIRL* (1)
WOBBL* (1)
DIFFUSION
BLUR* (1)
CLOUD* (1)
CLOUDY* (1)
CURTAIN* (1)
DARKEN* (1)
DIFFUS* (1)
DIM (1)
DIMM* (1)
DIMS (1)
EQUIVOCAL* (1)
FADE (1)
FADED (1)
FADES* (1)
FADING* (1)
FOG (1)
FOGG* (1)
FOGS (1)
HAZE* (1)
HAZING* (1)
HAZY* (1)
INDEFINIT* (1)
INDISTINCT* (1)
MIST* (1)
MISTY* (1)
MURKINES* (1)
MURKY* (1)
NEBULA* (1)
NEBULOU* (1)
OBSCUR* (1)
OVERCAST* (1)
SCREEN* (1)
SHAD* (1)
SHADOW* (1)
SHADOWY* (1)
SHADY* (1)
TWILIGHT* (1)
UNCERTAIN* (1)
UNCERTAINT* (1)
UNCLEAR* (1)
VAGU* (1)
VAPOR* (1)
VAPOUR* (1)
VEIL* (1)
CHAOS
AIMLES* (1)
AMBIGUIT* (1)
AMBIGUOU* (1)
ANARCHY* (1)
CHANC* (1)
CHAO* (1)
CHAR (1)
CHARS (1)
CATASTROPHE (1)
CONFUS* (1)
CROWD* (1)
DISCORD* (1)
DISCORDANT* (1)
DISHEVEL* (1)
DISORDER* (1)
ENTANGL* (1)
GORDIAN* (1)
HAPHAZARD* (1)
IRREGULAR* (1)
JUMBL* (1)
JUNGL* (1)
LABYRINTH* (1)
LAWLES* (1)
LITTER* (1)
MOB (1)
MOBB* (1)
MOBS (1)
OVERGROWN* (1)
OVERRUN* (1)
PERPLEX* (1)
RANDOM* (1)
RUIN* (1)
UNRU* (1)
WILD* (1)
REGRESSIVE COGNITION
UNKNOWN
BIZZAR* (1)
BODILES* (1)
BOUNDLES* (1)
CRYPTIC* (1)
ENIGMA* (1)
ESOTERIC* (1)
EXOTIC* (1)
FANTASTIC* (1)
FORMLES* (1)
IMMEASURABL* (1)
INCONCEIVABL* (1)
INCREDIBL* (1)
INDESCRIBABL* (1)
INEFFABL* (1)
INFINITY* (1)
INSCRUTABL* (1)
LIMITLES* (1)
MAGI* (1)
MAGIC* (1)
MAGU* (1)
MARVEL* (1)
MYST* (1)
NAMELES* (1)
NOTHINGNES* (1)
NUMBERLES* (1)
OCCULT* (1)
ODD* (1)
SECRECY* (1)
SECRET* (1)
SHAPELES* (1)
SORCERER* (1)
SORCERES* (1)
STRANG* (1)
TRANSCEND* (1)
UNBELIEVABL* (1)
UNBOUND* (1)
UNIMAGINABL* (1)
UNKNOWN* (1)
UNLIMIT* (1)
UNSPEAKABL* (1)
UNTOLD* (1)
VOID* (1)
TIMELESSNESS
AEON* (1)
CEASELES* (1)
CENTURI* (1)
CENTURY* (1)
CONTINUAL* (1)
CONTINUOU* (1)
ENDLES* (1)
ENDUR* (1)
EON* (1)
ETERNAL* (1)
ETERNITY* (1)
EVERLAST* (1)
FOREVER* (1)
IMMORTAL* (1)
INCESSANT* (1)
LIFETIM* (1)
OUTLIV* (1)
PERMANENC* (1)
PERMANENT* (1)
PERPETUAL* (1)
TIMELESSNES* (1)
UNCEAS* (1)
UNDY* (1)
UNEND* (1)
TEST5
CONSCIOUSNESS ALTERATION
AMUCK* (1)
ASLEEP* (1)
AWAK* (1)
AWAKEN* (1)
BEDLAM* (1)
COMA* (1)
CRAZ* (1)
CRAZY* (1)
DELIRIOU* (1)
DELIRIUM* (1)
DELPHIC* (1)
DEMENT* (1)
DOZE (1)
DOZED (1)
DOZES (1)
DOZING (1)
DREAM* (1)
DREAMY* (1)
DROWSY* (1)
ECSTACY* (1)
ECSTASY* (1)
ECSTATIC* (1)
ENCHANT* (1)
EPILEPSY* (1)
EPILEPTIC* (1)
EXSTASY* (1)
FAINT* (1)
FANTASI* (1)
FANTASY* (1)
FEBRIL* (1)
FEVER* (1)
FEVERISH* (1)
FRENZY* (1)
HALLUCINAT* (1)
HASHISH* (1)
HIBERNAT* (1)
HYPNO* (1)
HYSTERIA* (1)
HYSTERIC* (1)
IMAGIN* (1)
IMAGINAT* (1)
INSAN* (1)
INSANITY* (1)
INTUIT* (1)
IRRATIONAL* (1)
LAUDANUM* (1)
LUNACY* (1)
LUNATIC* (1)
MAD (1)
MADLY (1)
MADMAN* (1)
MADMEN* (1)
MADNES* (1)
MADWOMAN* (1)
MADWOMEN* (1)
MANIA* (1)
MANIAC* (1)
MEDITAT* (1)
MESMERIZ* (1)
MONOMANIA* (1)
NAP (1)
NAPP* (1)
NAPS (1)
NEUROSI* (1)
NEUROTIC* (1)
NIGHTMAR* (1)
NIGHTMARISH* (1)
OPIUM* (1)
OPIATES (1)
ORACL* (1)
PARANO* (1)
PREMONIT* (1)
PSYCHIC* (1)
PSYCHOSI* (1)
PSYCHOTIC* (1)
RAPTUR* (1)
RAPTUROU* (1)
REVERI* (1)
REVERY* (1)
REVIV* (1)
SEER* (1)
SLEEP* (1)
SLEEPY* (1)
SLUMBER* (1)
STUPOR* (1)
SWOON* (1)
TELEPATHY* (1)
TRANC* (1)
UNREASON* (1)
VERTIGO* (1)
VISIONARY* (1)
WAK* (1)
WOKE (1)
BRINK-PASSAGE
ACCES* (1)
AISL* (1)
AQUEDUCT* (1)
ARTERI* (1)
ARTERY* (1)
AVENU* (1)
BARRIER* (1)
BORDER* (1)
BOUNDARI* (1)
BOUNDARY* (1)
BRIDG* (1)
BRIM* (1)
BRINK* (1)
CANAL* (1)
CHANNEL* (1)
COAST* (1)
CONDUIT* (1)
CORRIDOR* (1)
CURB* (1)
DOOR* (1)
DOORSTEP* (1)
DOORWAY* (1)
EDG* (1)
ENTRANC* (1)
ENTRY* (1)
FENC* (1)
FERRI* (1)
FERRY* (1)
FLOOR* (1)
FOOTPATH* (1)
FOYER* (1)
FRAM* (1)
FRING* (1)
FRONTIER* (1)
GATE* (1)
GATING* (1)
HALL* (1)
HALLWAY* (1)
HIGHWAY* (1)
HORIZON* (1)
LANE (1)
LANES (1)
LEDG* (1)
LINE (1)
LINED (1)
LINES (1)
LINING* (1)
MARGIN* (1)
PASSAG* (1)
PASSAGEWAY* (1)
PATH* (1)
PERIMET* (1)
PERIPHER* (1)
PORT* (1)
RAILROAD* (1)
RAILWAY* (1)
RIM (1)
RIMM* (1)
RIMS (1)
ROAD* (1)
ROUT* (1)
SIDEWALK* (1)
SKYLIN* (1)
STAIR* (1)
STEP* (1)
STREET* (1)
THRESHOLD* (1)
TRAIL* (1)
VERG* (1)
VIADUCT* (1)
VISTA* (1)
WALL* (1)
WINDOW* (1)
NARCISSISM
ARM (1)
ARMS (1)
BEARD* (1)
BLOOD* (1)
BODI* (1)
BODY* (1)
BONE (1)
BONES (1)
BRAIN* (1)
BROW (1)
BROWS (1)
CHEEK* (1)
CHEST* (1)
CHIN* (1)
CORPS* (1)
EYE* (1)
FACE (1)
FACES (1)
FACIES (1)
FEET* (1)
FLESH* (1)
FOOT* (1)
FOREHEAD* (1)
HAIR* (1)
HAND* (1)
HEAD* (1)
HEART* (1)
HEEL* (1)
HIP (1)
HIPS (1)
KIDNEY* (1)
KNEE (1)
KNEES (1)
LEG (1)
LEGS (1)
LIMB* (1)
LIVER* (1)
MUSCL* (1)
NAVEL* (1)
NECK* (1)
ORGAN* (1)
PALM* (1)
RIB (1)
RIBS (1)
SHOULDER* (1)
SKIN* (1)
SKULL* (1)
THUMB* (1)
TOE (1)
TOES (1)
VEIN* (1)
WRIST* (1)
CONCRETENESS
ACROS* (1)
AFAR* (1)
AFIELD* (1)
AHEAD* (1)
ALONG* (1)
AMONG* (1)
APART* (1)
ASID* (1)
AT (1)
AWAY* (1)
BACK* (1)
BEHIND* (1)
BESID* (1)
BETWEEN* (1)
CENTER* (1)
CENTR* (1)
CIRCL* (1)
CLOS* (1)
CLOSER* (1)
CORNER* (1)
CURV* (1)
DISTANC* (1)
DISTANT* (1)
EAST* (1)
EASTERN* (1)
EVERYWHER* (1)
EXTEND* (1)
EXTENSIV* (1)
EXTENT* (1)
FAR (1)
FARTHER* (1)
FLAT* (1)
FORWARD* (1)
FRONT* (1)
FURTHER* (1)
HERE (1)
HITHER* (1)
INSID* (1)
INTERIOR* (1)
LAYER* (1)
LENGTH* (1)
LEVEL* (1)
LONG* (1)
MIDDL* (1)
MIDST* (1)
NARROW* (1)
NEAR* (1)
NEARBY* (1)
NEARER* (1)
NEAREST* (1)
OFF (1)
OPEN* (1)
OUT (1)
OUTING* (1)
OUTS (1)
OUTSID* (1)
OUTWARD* (1)
OVER* (1)
PLAC* (1)
POINT* (1)
POSIT* (1)
REAR* (1)
REGION* (1)
ROUND* (1)
SEPARAT* (1)
SIDE (1)
SIDED (1)
SIDES (1)
SIDING* (1)
SITUAT* (1)
SOMEWHER* (1)
SOUTH* (1)
SPAC* (1)
SPACIOU* (1)
SPATIAL (1)
SQUAR* (1)
STRAIGHT* (1)
SURFAC* (1)
SURROUND* (1)
THENC* (1)
THITHER* (1)
TIP (1)
TIPP* (1)
TIPS (1)
TOWARD* (1)
WEST* (1)
WESTERN* (1)
WHER* (1)
WHEREVER* (1)
WIDE* (1)
WIDTH* (1)
WITHIN* (1)
ICARIAN IMAGERY
ASCENT
ALOFT* (1)
ARIS* (1)
ARISEN* (1)
AROS* (1)
ASCEND* (1)
ASCENS* (1)
BOUNC* (1)
CLIMB* (1)
DANGL* (1)
DAWN* (1)
FLAP* (1)
FLED (1)
FLEW* (1)
FLIER* (1)
FLIGHT* (1)
FLING* (1)
FLOAT* (1)
FLOWN* (1)
FLUNG* (1)
FLUTTER* (1)
FLY* (1)
HANG* (1)
HOVER* (1)
HURL* (1)
ICARIAN* (1)
ICARU* (1)
JUMP* (1)
LEAP* (1)
LEPT* (1)
LIFT* (1)
MOUNT* (1)
MOUNTAINSID* (1)
RISE (1)
RISEN* (1)
RISES (1)
RISING* (1)
SOAR* (1)
SPRANG* (1)
SPRING* (1)
SPRUNG* (1)
SUNRIS* (1)
SWING* (1)
THREW* (1)
THROW* (1)
THROWN* (1)
TOSS* (1)
UPHILL* (1)
UPWARD* (1)
WING* (1)
HEIGHT
ABOV* (1)
AERIAL* (1)
AIRPLAN* (1)
ARCH (1)
ATMOSPHER* (1)
BALCONY* (1)
BATTLEMENT* (1)
BIRD* (1)
BRANCH* (1)
CEIL* (1)
CLIFF* (1)
CRAG* (1)
CRAGGY* (1)
DOME (1)
DOMES (1)
DOMING (1)
ELEVAT* (1)
ERECT* (1)
GREW* (1)
GROW* (1)
GROWN* (1)
HEAP* (1)
HEAVEN* (1)
HEIGHT* (1)
HIGH* (1)
HIGHER* (1)
HILL* (1)
HILLSID* (1)
HILLTOP* (1)
HUNG* (1)
LADDER* (1)
LOFT* (1)
LOFTY* (1)
MOUND* (1)
MOUNTAIN* (1)
OBELISK* (1)
OVERHEAD* (1)
PEAK* (1)
PILE* (1)
PILING* (1)
PLANET* (1)
PRECIPIC* (1)
PYRAMID* (1)
RAFTER* (1)
RAINBOW* (1)
RAMPART* (1)
RIDG* (1)
ROOF* (1)
SKY (1)
SLOP* (1)
SPIR* (1)
STEEP* (1)
SUMMIT* (1)
TALL* (1)
TALLER* (1)
TALLEST* (1)
TOP (1)
TOPP* (1)
TOPS (1)
TOWER* (1)
TREE* (1)
TRELLI* (1)
UPPER* (1)
UPPERMOST* (1)
ZENITH* (1)
DESCENT
BASE (1)
BASES (1)
BURI* (1)
BURROW* (1)
BURY* (1)
DESCEND* (1)
DESCENT* (1)
DIG (1)
DIGG* (1)
DIGS (1)
DIP (1)
DIPP* (1)
DIPS (1)
DIVE* (1)
DOWNHILL* (1)
DOWNSTREAM* (1)
DROOP* (1)
DROP (1)
DROPS (1)
DUG (1)
FALL* (1)
FALLEN* (1)
FELL* (1)
HEADLONG* (1)
LEAN* (1)
PLUNG* (1)
RECED* (1)
RECLIN* (1)
SANK* (1)
SINK* (1)
SLID* (1)
SLIP* (1)
STOOP* (1)
SUNDOWN* (1)
SUNK* (1)
SUNKEN* (1)
SUNSET* (1)
SWOOP* (1)
TOPPL* (1)
TUMBL* (1)
DEPTH
BELOW* (1)
BENEATH* (1)
BOTTOM* (1)
CANYON* (1)
CAVE* (1)
CAVING* (1)
CELLAR* (1)
CHASM* (1)
CREVAS* (1)
DEEP* (1)
DEEPER* (1)
DEPTH* (1)
DITCH* (1)
DOWNWARD* (1)
GUTTER* (1)
HOLE (1)
HOLES (1)
LOW* (1)
PIT (1)
PITS (1)
PITT* (1)
PRECIPITOU* (1)
RAVIN* (1)
ROOT* (1)
SUBMARIN* (1)
TRENCH* (1)
TUNNEL* (1)
UNDER (1)
UNDERGROUND* (1)
UNDERNEATH* (1)
UNDERWORLD* (1)
VALLEY* (1)
FIRE
SOLAR (1)
ABLAZ* (1)
AFIR* (1)
ASH (1)
ASHES (1)
BLAST* (1)
BLAZ* (1)
BOIL* (1)
BROIL* (1)
BURN* (1)
BURNT* (1)
CANDL* (1)
CHARCOAL* (1)
COAL* (1)
COMBUST* (1)
EMBER* (1)
FIERY* (1)
FIRE* (1)
FLAM* (1)
HEARTH* (1)
HEAT* (1)
HOT (1)
IGNIT* (1)
INFERNO* (1)
INFLAM* (1)
KINDL* (1)
LIT (1)
MELT* (1)
SCORCH* (1)
SEAR* (1)
SIZZL* (1)
SMOK* (1)
SMOLDER* (1)
SMOULDER* (1)
SPARK* (1)
SULTRY* (1)
SUN (1)
SUNN* (1)
SUNS (1)
SUNSTROK* (1)
TROPIC* (1)
TROPICAL* (1)
WARM* (1)
WARMTH* (1)
WATER
BATH* (1)
BEACH* (1)
BROOK* (1)
BUBBL* (1)
BUCKET* (1)
CREEK* (1)
DAM (1)
DAMM* (1)
DAMP* (1)
DAMS (1)
DEW (1)
DEWS (1)
DEWY (1)
DIKE* (1)
DOWNPOUR* (1)
DRENCH* (1)
SHORING (1)
SURF (1)
SURFING (1)
DRIP* (1)
FEN (1)
FLOOD* (1)
FLUID* (1)
FOAM* (1)
FOUNTAIN* (1)
GURGL* (1)
HUMID* (1)
LAKE (1)
LAKES (1)
LIQUID* (1)
MOAT* (1)
MOIST* (1)
MOISTUR* (1)
MOSS (1)
MOSSES (1)
OCEAN* (1)
OVERFLOW* (1)
PERSPIR* (1)
PERSPIRAT* (1)
POND* (1)
POOL* (1)
POUR* (1)
RAIN* (1)
RAINFALL* (1)
RIVER* (1)
SATURAT* (1)
SEA (1)
SEAS (1)
SHORE (1)
SHORES (1)
SHOWER* (1)
SOAK* (1)
SPLASH* (1)
SPRINKL* (1)
STEAM* (1)
STEAMY* (1)
STREAM* (1)
SWAM* (1)
SWAMP* (1)
SWAMPY* (1)
SWIM* (1)
SWUM* (1)
TIDE (1)
TIDES (1)
TIDING (1)
TRICKL* (1)
WADE* (1)
WADING (1)
WASH* (1)
WATER* (1)
WATERFALL* (1)
WET* (1)
SECONDARY
ABSTRACTION
DIVERSE (1)
DIVERSIFICATION (1)
DIVERSIFIED (1)
DIVERSITY (1)
EVIDENT (1)
EVIDENTIAL (1)
GUESS* (1)
LOGISTIC (1)
ABSTRACT* (1)
ALMOST* (1)
ALTERNATIV* (1)
ANALY* (1)
ATTRIBUT* (1)
AXIOM* (1)
BASIC* (1)
BELIEF* (1)
BELIEV* (1)
CALCULAT* (1)
CAUS* (1)
CERTAIN* (1)
CHARACTERIZ* (1)
CHOIC* (1)
CHOOS* (1)
CHOS* (1)
CIRCUMSTANC* (1)
COMPREHEND* (1)
COMPAR* (1)
COMPREHENS* (1)
CONDITIONAL* (1)
CONCENTRAT* (1)
CONCEPT* (1)
CONCLUD* (1)
CONJECTUR* (1)
CONSEQUENC* (1)
CONSEQUENT* (1)
CONSIDER* (1)
CONTRIV* (1)
CRITER* (1)
CRITERIA* (1)
DECID* (1)
DEEM* (1)
DEFIN* (1)
DELIBERAT* (1)
DETERMIN* (1)
DIFFERENC* (1)
DIFFERENT* (1)
DISTINCT* (1)
DISTINGUISH* (1)
DOCTRIN* (1)
EFFECT* (1)
ESTABLISH* (1)
ESTIMAT* (1)
EVALUAT* (1)
EVIDENC* (1)
EXAMIN* (1)
EXAMPL* (1)
EXCEPT* (1)
FACT (1)
FACTS (1)
FEATUR* (1)
FIGUR* (1)
FORETHOUGHT* (1)
FORMULAT* (1)
GUES* (1)
HISTORY* (1)
IDEA* (1)
IMPORTANC* (1)
IMPORTANT* (1)
INFORMAT* (1)
INTERPRET* (1)
INTERPRETAT* (1)
JUDG* (1)
JUDGMENT* (1)
KNEW* (1)
KNOW* (1)
LEARN* (1)
LOGIC* (1)
MAY (1)
MEANT* (1)
MISTAK* (1)
MISTAKEN* (1)
MISTOOK* (1)
MODEL* (1)
OPIN* (1)
OTHERWIS* (1)
PERHAP* (1)
PLAN* (1)
POSSI* (1)
PREDICAT* (1)
PREDICT* (1)
PROBAB* (1)
PROBABL* (1)
PROBLEM* (1)
PROOF* (1)
PROV* (1)
PURPOS* (1)
QUALI* (1)
QUANT* (1)
RE-ANALY* (1)
RE-EXAMIN* (1)
RATIONAL* (1)
REAL (1)
REALITY* (1)
REASON* (1)
REASONABL* (1)
RECONSIDER* (1)
REEXAMIN* (1)
REFORMULAT* (1)
REINTERPRETAT* (1)
RELEARN* (1)
RELEVANC* (1)
RELEVANT* (1)
RESEARCH* (1)
RESOLV* (1)
SCHEM* (1)
SCIENC* (1)
SCIENTIFIC* (1)
SELECT* (1)
SIGNIFICANC* (1)
SOLUT* (1)
SOMETH* (1)
SOMEWHAT* (1)
SOURC* (1)
SUBJECT* (1)
SUPPOS* (1)
SURE (1)
SURELY (1)
TEND* (1)
THEM* (1)
THEOR* (1)
THINK* (1)
THINKER* (1)
THOUGHT* (1)
TOPIC* (1)
TRUE (1)
TRULY (1)
TRUTH* (1)
TTT1 (1)
UNDERSTAND* (1)
UNDERSTOOD* (1)
WEIGH (1)
WEIGHED* (1)
WEIGHING* (1)
WEIGHS (1)
WHY (1)
SOCIAL BEHAVIOR
GUEST* (1)
QUOTA (1)
QUOTA-* (1)
QUOTAS (1)
ACQUIESCENCE (1)
APPROBATION (1)
CONSENSUS* (1)
CONSULT (1)
PROSOCIAL (1)
SOCIABLE (1)
ABLE* (1)
ACCEPT* (1)
ACCEPTANC* (1)
ADDRES* (1)
ADMIT* (1)
ADVIC* (1)
ADVIS* (1)
AGRE* (1)
AID* (1)
ALLOW* (1)
ANNOUNC* (1)
ANSWER* (1)
APOLOGIS* (1)
APOLOGIZ* (1)
APPEAL* (1)
APPROV* (1)
APPROVAL* (1)
ASK (1)
ASKED (1)
ASKING (1)
ASKS (1)
ASSIST* (1)
ASSUR* (1)
BARGAIN* (1)
BECKON* (1)
BESEECH* (1)
BORROW* (1)
CALL* (1)
COMMENT* (1)
COMMIT* (1)
COMMUNICAT* (1)
CONDUCT* (1)
CONFER* (1)
CONFES* (1)
CONFID* (1)
CONFIRM* (1)
CONGRATULAT* (1)
CONSENT* (1)
CONSOL* (1)
CONSOLAT* (1)
CONVERS* (1)
CONVERSAT* (1)
CONVINC* (1)
COOPERAT* (1)
COUNSEL* (1)
DECLAR* (1)
DEPEND* (1)
DEPENDENT* (1)
DESCRIB* (1)
DIALOGU* (1)
DISCOURS* (1)
DISCUS* (1)
DISCUSS* (1)
DONAT* (1)
EDUCAT* (1)
ELECT* (1)
ENCOURAG* (1)
ENCOURAGEMENT* (1)
ENGAG* (1)
ESCORT* (1)
EXCUS* (1)
EXPLAIN* (1)
FOLLOW* (1)
FORGAV* (1)
FORGIV* (1)
FORGIVEN* (1)
GENEROSITY* (1)
GENEROU* (1)
GIFT* (1)
GRANT* (1)
GREET* (1)
GUID* (1)
GUIDANC* (1)
HELP* (1)
IMITAT* (1)
IMPLOR* (1)
INFLUENC* (1)
INFORM* (1)
INQUIR* (1)
INSTRUCT* (1)
INTERVIEW* (1)
INTRODUC* (1)
INVIT* (1)
KNEEL* (1)
LEND* (1)
LENT* (1)
MEET* (1)
MENT* (1)
MESSAG* (1)
MET* (1)
MUTUAL* (1)
OFFER* (1)
PARDON* (1)
PARTICIPAT* (1)
PERSUAD* (1)
PERSUA* (1)
PLEAD* (1)
PLEAS* (1)
PREACH* (1)
PROCLAIM* (1)
PROMIS* (1)
PROPOS* (1)
PROTECT* (1)
PROVID* (1)
QUOT* (1)
RECIT* (1)
REEDUCATION (1)
REMARK* (1)
REMIND* (1)
REPLI* (1)
REPLY (1)
REPRESENT* (1)
REQUEST* (1)
RESCU* (1)
RESPOND* (1)
RESPONS* (1)
SAID* (1)
SALE (1)
SALES (1)
SAY* (1)
SERVIC* (1)
SHAR* (1)
SHELTER* (1)
SIGNAL* (1)
SOCIAL* (1)
SOLICIT* (1)
SPEAK* (1)
SPEAKER* (1)
SPEECH* (1)
SPOK* (1)
SPOKEN* (1)
SUGGEST* (1)
SWORN* (1)
TALK* (1)
TAUGHT* (1)
TEACH* (1)
TELL* (1)
THANK* (1)
TOLD* (1)
TREAT* (1)
UTTER* (1)
VISIT* (1)
INSTRUMENTAL BEHAVIOR
AVAIL (1)
CAVEAT* (1)
DIVESTMENT* (1)
DIVIDEND* (1)
FOUNDR* (1)
LABORATOR* (1)
SPIN-OFF* (1)
AVAILABILITY (1)
COMPONENT* (1)
INGREDIENT (1)
LOGISTICS (1)
MERCHANDISE (1)
PROVISION* (1)
ACHIEV* (1)
ACHIEVEMENT* (1)
ACQUIR* (1)
ACQUISIT* (1)
AFFORD* (1)
AIM* (1)
APPLIC* (1)
APPLIE* (1)
APPLY (1)
ARCHITECT* (1)
ASSEMBL* (1)
ATTAIN* (1)
ATTEMPT* (1)
AVAILABL* (1)
BELONG* (1)
BID* (1)
BOUGHT* (1)
BUILD* (1)
BUILT* (1)
BURDEN* (1)
BUSINES* (1)
BUY* (1)
CAPABL* (1)
CARRI* (1)
CARRY* (1)
CLAIM* (1)
COLLECT* (1)
CONSTRUCT* (1)
COPI* (1)
COPY* (1)
COST* (1)
COUNT* (1)
CRAFT* (1)
CRAFTSMAN* (1)
CULTIVAT* (1)
CURE* (1)
CURING* (1)
DELIVER* (1)
EARN* (1)
EFFORT* (1)
EMPLOY* (1)
ENDEAVOR* (1)
FACTORI* (1)
FACTORY* (1)
FEAT (1)
FEATS (1)
FIND* (1)
FINISH* (1)
FORGE (1)
FORGES (1)
FOUND* (1)
GAIN* (1)
GOAL* (1)
GRASP* (1)
HARVEST* (1)
HIRE (1)
HIRED (1)
HIRES (1)
HIRING* (1)
IMPROV* (1)
INDUSTRI* (1)
INDUSTRY* (1)
JOB (1)
JOBS (1)
LABOR* (1)
LABORIOU* (1)
LABOUR* (1)
LABOURIOU* (1)
LESSON* (1)
MACHIN* (1)
MACHINERY* (1)
MAK* (1)
MANIPULAT* (1)
MANUFACTUR* (1)
MARKET* (1)
MEND* (1)
MERCHANT* (1)
MONEY* (1)
OBTAIN* (1)
OCCUPAT* (1)
OCCUPY* (1)
OWNERSHIP* (1)
PAID* (1)
PAY (1)
PAYING* (1)
PAYS (1)
PERFORM* (1)
PICK* (1)
PLOUGH* (1)
PLOW* (1)
POSSES* (1)
POSSESS* (1)
PRACTIC* (1)
PREPAR* (1)
PRIC* (1)
PRIVATION* (1)
PRODUC* (1)
PROFIT* (1)
PROFITABL* (1)
PROPERTY* (1)
PURCHAS* (1)
PURSU* (1)
REACH* (1)
RECONSTRUCT (1)
RECORD* (1)
RECOVER* (1)
REPAIR* (1)
REPRODUCE (1)
RESTOR* (1)
RESULT* (1)
RISK* (1)
SEL* (1)
SELL* (1)
SKIL* (1)
SKILL* (1)
SKILLFUL* (1)
SOLD* (1)
SOW* (1)
SPEND* (1)
SPENT* (1)
STUDENT* (1)
STUDI* (1)
STUDIOU* (1)
STUDY* (1)
SUCCE* (1)
SWEEP* (1)
SWEPT* (1)
TASK* (1)
TEST* (1)
TOIL (1)
TOILED (1)
TOILS* (1)
TRAD* (1)
TRIED (1)
TRY (1)
TRYING* (1)
TRYS (1)
USE (1)
USED (1)
USES (1)
USING (1)
WIN (1)
WINNING* (1)
WINS (1)
WON (1)
WORK* (1)
RESTRAINT
COMPTROLLER* (1)
DISCIPLINE (1)
MAGIST* (1)
PENALIZ* (1)
PENITENTIARY (1)
ARREST* (1)
ASSIGN* (1)
AUTHORIZ* (1)
BAR (1)
BARRED (1)
BARRING (1)
BARS (1)
BIND* (1)
BLOCK* (1)
BLOCKAD* (1)
BOUND* (1)
CAG* (1)
CAPTIV* (1)
CAPTIVITY* (1)
CAPTUR* (1)
CATCH* (1)
CAUGHT* (1)
CENSUR* (1)
CHASTIS* (1)
CHASTIZ* (1)
COERC* (1)
COMPEL* (1)
CONFIN* (1)
CONFORM* (1)
CONFORMITY* (1)
CONSTRAIN* (1)
CONSTRAINT* (1)
CONSTRICT* (1)
CONTROL* (1)
DECREE* (1)
DETAIN* (1)
DETER* (1)
DUNGEON* (1)
ENCLOS* (1)
FORBAD* (1)
FORBID* (1)
FORBIDDEN* (1)
GUARD* (1)
GUARDIAN* (1)
HALT* (1)
HAMPER* (1)
HINDER* (1)
HINDRANC* (1)
IMPERATIV* (1)
IMPRISON* (1)
INHIBIT* (1)
INSIST* (1)
INTERFER* (1)
INTERRUPT* (1)
JAIL* (1)
LEASH* (1)
LIMIT* (1)
LOCK* (1)
MANAG* (1)
MUST* (1)
NECESSARY* (1)
NECESSITY* (1)
OBEDIENC* (1)
OBEY* (1)
OBLIG* (1)
OBLIGAT* (1)
OBSTACL* (1)
OBSTRUCT* (1)
PENALTI* (1)
PENALTY* (1)
PERMISS* (1)
PERMIT* (1)
POLIC* (1)
POLICEMAN* (1)
POLICEMEN* (1)
PRESCRIB* (1)
PREVAIL* (1)
PREVENT* (1)
PRISON* (1)
PROHIBIT* (1)
PUNISH* (1)
PUNISHMENT* (1)
REFUS* (1)
REGULAT* (1)
REIGN* (1)
REQUIR* (1)
REQUIREMENT* (1)
RESIST* (1)
RESTRAIN* (1)
RESTRAINT* (1)
RESTRICT* (1)
SCOLD* (1)
SHUT* (1)
STOP* (1)
STRICT* (1)
SUMMON* (1)
SUPPRES* (1)
TABOO* (1)
TAX* (1)
THWART* (1)
ORDER
ORDINAL (1)
ACCURAT* (1)
ARRANG* (1)
ARRAY* (1)
BALANC* (1)
CATALOG* (1)
CLASS* (1)
CONSISTENC* (1)
CONSISTENT* (1)
CONSTANC* (1)
CONSTANT* (1)
DIVID* (1)
FORM* (1)
FORMULA* (1)
GRAD* (1)
INDEX* (1)
LIST* (1)
MEASUR* (1)
METHOD* (1)
MODERAT* (1)
NEAT* (1)
NORM* (1)
NORMAL* (1)
ORGANI* (1)
ORDER (1)
PATTERN* (1)
PRECIS* (1)
RANK* (1)
REGULAR* (1)
REORGANIZ* (1)
ROUTIN* (1)
SERIAL (1)
SERIES* (1)
SIMPL* (1)
SIMPLICITY* (1)
STABILITY* (1)
STANDARD* (1)
SYMMETR* (1)
SYSTEM* (1)
UNIFORM* (1)
UNIVERSAL* (1)
TEMPORAL REFERENCES
FULL-TIME (1)
LONG-TERM (1)
LONGEVIT* (1)
PART-TIME (1)
SHORT-TERM (1)
ABRUPT* (1)
AGAIN (1)
AGO (1)
ALREADY* (1)
ANCIENT (1)
BREVITY* (1)
BRIEF* (1)
CLOCK* (1)
DAILY* (1)
DATE (1)
DATED (1)
DATES (1)
DATING (1)
DECAD* (1)
DUR* (1)
DURAT* (1)
EARLIER* (1)
EARLY* (1)
EPHEMERAL* (1)
EVER* (1)
FORMER* (1)
FREQUENT* (1)
HAST* (1)
HENCEFORTH* (1)
HOUR* (1)
IMMEDIAT* (1)
IMMEDIATE* (1)
INSTANT* (1)
INTERLUD* (1)
MEANTIM* (1)
MEANWHIL* (1)
MINUT* (1)
MOMENT* (1)
MOMENTARY* (1)
MONTH* (1)
NOW (1)
OCCAS* (1)
OCCASIONAL* (1)
OFTEN* (1)
OLD (1)
OLDER* (1)
ONCE (1)
PAST* (1)
PREMATUR* (1)
PRESENT* (1)
PREVIOU* (1)
PRIOR* (1)
QUICK* (1)
SEASON* (1)
SELDOM* (1)
SOMETIM* (1)
SOON* (1)
SOONER* (1)
SUDDEN* (1)
TEMPORARY* (1)
THEN* (1)
TILL* (1)
TIME* (1)
TIMING* (1)
TODAY* (1)
TONIGHT* (1)
WEEK* (1)
WHEN* (1)
WHENEVER* (1)
WHIL* (1)
YEAR* (1)
YESTERDAY* (1)
MORAL IMPERATIVE
LEGITIMACY (1)
RESPECT (1)
BIRTHRIGHT* (1)
COMMANDMENT* (1)
CONSCIENC* (1)
CONSCIENTIOU* (1)
CORRECT* (1)
CUSTOM (1)
CUSTOMER* (1)
CUSTOMIZ* (1)
DUTI* (1)
DUTY* (1)
ETHIC* (1)
HONEST* (1)
HONESTY* (1)
HONOR* (1)
HONORABL* (1)
HONOUR* (1)
HONOURABL* (1)
JUSTIC* (1)
LAW (1)
LAWFUL* (1)
LAWS (1)
LEGAL* (1)
LEGITIMAT* (1)
MORAL* (1)
MORALITY* (1)
OUGHT* (1)
PREROGATIV* (1)
PRINCIPL* (1)
PRIVILEG* (1)
PROPER* (1)
RECTITUD* (1)
RESPECTFUL* (1)
RESPONSIBILITY* (1)
RESPONSIBL* (1)
RIGHT* (1)
RIGHTEOU* (1)
RIGHTFUL* (1)
SANCT* (1)
SHOULD* (1)
TRUSTWORTHY* (1)
UNJUST* (1)
UPRIGHT* (1)
VIRTU* (1)
EMOTIONS
POSITIVE AFFECT
AMUS* (1)
AMUSEMENT* (1)
BLITH* (1)
CAREFRE* (1)
CELEBRAT* (1)
CHEER* (1)
CHEERFUL* (1)
CHEERY* (1)
CHUCKL* (1)
DELIGHT* (1)
DELIGHTFUL* (1)
ELAT* (1)
ENJOY* (1)
ENJOYABL* (1)
ENJOYMENT* (1)
ENTERTAIN* (1)
ENTERTAINMENT* (1)
ENTHUSIASM* (1)
ENTHUSIASTIC* (1)
EXCIT* (1)
EXHILERAT* (1)
EXULT* (1)
EXULTANT* (1)
FUN (1)
FUNNY* (1)
GAIETY* (1)
GAY* (1)
GLAD* (1)
GLADNES* (1)
GLEE (1)
GLEEFUL* (1)
GLEELY (1)
GRATIFI* (1)
GRATIFY* (1)
GRIN* (1)
HAPPINES* (1)
HAPPY* (1)
HILARIOU* (1)
HUMOR* (1)
HUMOROU* (1)
HUMOUR* (1)
HUMOUROU* (1)
JOCUND* (1)
JOK* (1)
JOLLY (1)
JOVIAL* (1)
JOY* (1)
JOYFUL* (1)
JOYOU* (1)
LAUGH* (1)
LAUGHTER* (1)
MERRIMENT* (1)
MERRY* (1)
MIRTH* (1)
MIRTHFUL* (1)
OVERJOY* (1)
PLAYFUL* (1)
PLEASANTRY* (1)
PLEASUR* (1)
PLEASURABL* (1)
REJOIC* (1)
RELIEF* (1)
RELIEV* (1)
ROLLICK* (1)
SATISF* (1)
SMIL* (1)
THRIL* (1)
THRILL* (1)
VIVACIOU* (1)
VIVACITY* (1)
ANXIETY
TREMOR (1)
AFRAID* (1)
AGHAST* (1)
ALARM* (1)
ANGUISH* (1)
ANXI* (1)
AVOID* (1)
BLUSH* (1)
CARES (1)
COWARD* (1)
COWER* (1)
CRISI* (1)
DANGEROU* (1)
DESPERAT* (1)
DISTRES* (1)
DREAD* (1)
DREADFUL* (1)
FEAR* (1)
FEARFUL* (1)
FRANTIC* (1)
FRET* (1)
FRIGHT* (1)
HORRIFI* (1)
HORRIFY* (1)
HORROR* (1)
NERVOU* (1)
NERVOUSNES* (1)
PANIC* (1)
PHOBIA* (1)
PHOBIC* (1)
SCARE (1)
SCARED (1)
SCARES (1)
SCARY (1)
SHRIEK* (1)
SHUDDER* (1)
SHY* (1)
TERRIFI* (1)
TERRIFY* (1)
TERROR* (1)
TIMID* (1)
TRAUMA* (1)
TREMBL* (1)
TREMULOU* (1)
TROUBL* (1)
UNEASINES* (1)
UNEASY* (1)
WORRI* (1)
WORRY* (1)
SADNESS
AGGRIEVED (1)
ALAS (1)
DEJECT* (1)
DEPRES* (1)
DEPRESS* (1)
DESPAIR* (1)
DESPONDANT* (1)
DESPONDENT* (1)
DIRG* (1)
DISAPPOINT* (1)
DISAPPOINTMENT* (1)
DISCONSOLAT* (1)
DISCOURAG* (1)
DISHEARTEN* (1)
DISMAL* (1)
DISSATISFI* (1)
DISSATISFY* (1)
DISTRAUGHT* (1)
DOLDRUM* (1)
DOWNCAST* (1)
DREARY* (1)
ELEGY* (1)
FORLORN* (1)
FROWN* (1)
FUNEREAL* (1)
GRIE* (1)
GROAN* (1)
HOPELES* (1)
HUMILIAT* (1)
LAMENT* (1)
LAMENTAT* (1)
LONE* (1)
LONELINES* (1)
MELANC* (1)
MISERABL* (1)
MISERI* (1)
MISERY* (1)
MOAN* (1)
MOURN* (1)
MOURNFUL* (1)
ORPHAN* (1)
PAIN* (1)
PITIFUL* (1)
PLAINT* (1)
REGRET* (1)
REGRETFUL* (1)
REMORS* (1)
REPENT* (1)
REPENTANC* (1)
REPENTENC* (1)
RUE (1)
SAD (1)
SADDEN* (1)
SADLY (1)
SADNES* (1)
SOB (1)
SOBB* (1)
SOBS (1)
SOMBER* (1)
SOMBR* (1)
SORROW* (1)
SORROWFUL* (1)
SORRY* (1)
SUFFER* (1)
TEARFUL* (1)
TRAGEDY* (1)
TRAGIC* (1)
UNHAPPINES* (1)
UNHAPPY* (1)
WAIL* (1)
WEEP* (1)
WEPT* (1)
WHIN* (1)
WOE (1)
WOES (1)
AFFECTION
AFFECT* (1)
AFFECTIONAT* (1)
AMOROU* (1)
AMOUROU* (1)
APPRECIAT* (1)
ATTRACTIV* (1)
BEFRIEND* (1)
BELOV* (1)
BOSOM* (1)
BRIDAL* (1)
BRIDE* (1)
CHERISH* (1)
CONGENIAL* (1)
CORDIAL* (1)
COURTSHIP* (1)
DARL* (1)
DEAR* (1)
DEVOT* (1)
EMBRAC* (1)
ENAMOR* (1)
ENAMOUR* (1)
ENDEAR* (1)
FAMILIAR* (1)
FONDER (1)
FAREWELL* (1)
FAVOR* (1)
FAVOUR* (1)
FIANC* (1)
FLIRT* (1)
FOND (1)
FONDNES* (1)
FRATERNITY* (1)
FRIEND* (1)
FRIENDSHIP* (1)
GOODBY* (1)
GRATEFUL* (1)
INTIMACY* (1)
INTIMAT* (1)
KIND* (1)
KINDNES* (1)
LIKE* (1)
LIKING* (1)
LOV* (1)
MARRI* (1)
MARRIAG* (1)
MARRY* (1)
MATE (1)
MATED (1)
MATES (1)
MATING* (1)
MERCY* (1)
PAT (1)
PATS (1)
PATT* (1)
PITI* (1)
PITY* (1)
ROMANC* (1)
SWEETHEART* (1)
SYMPAT* (1)
UNSELFISH* (1)
WARMHEART* (1)
WELCOM* (1)
WOOED* (1)
WOOING* (1)
WOOS (1)
AGGRESSION
ABHOR* (1)
ABUS* (1)
ABUSIV* (1)
ACCUS* (1)
AFFLICT* (1)
AGGRESS* (1)
AGGRESSIV* (1)
AMBUSH* (1)
ANGER* (1)
ANGRI* (1)
ANGRIER* (1)
ANGRY* (1)
ANNIHILAT* (1)
ANNOY* (1)
ANNOYANC* (1)
ANTAGONIZ* (1)
ARGU* (1)
ARGUMENT* (1)
ARMY* (1)
ARROW* (1)
ASSAULT* (1)
ATTACK* (1)
AVENG* (1)
AX (1)
AXE (1)
AXES (1)
BATTL* (1)
BEAK* (1)
BEAT* (1)
BEATEN* (1)
BETRAY* (1)
BLADE* (1)
BLAM* (1)
BLOODY* (1)
BOTHER* (1)
BRAWL* (1)
BREAK* (1)
BROK* (1)
BROKEN* (1)
BRUTAL* (1)
CANNON* (1)
CHID* (1)
COMBAT* (1)
COMPLAIN* (1)
CONFLICT* (1)
CONDEMN* (1)
CONTROVERSY* (1)
CRITIC* (1)
CRUEL* (1)
CRUSH* (1)
CUT (1)
CUTS (1)
CUTT* (1)
DAMAG* (1)
DECEI* (1)
DEFEAT* (1)
DEGRAD* (1)
DEMOLISH* (1)
DEPRIV* (1)
DERID* (1)
DESPIS* (1)
DESTROY* (1)
DESTRUCT* (1)
DESTRUCTIV* (1)
DETEST* (1)
DISAGRE* (1)
DISAGREEMENT* (1)
DISAPPROV* (1)
DISCONTENT* (1)
DISLIK* (1)
DISPUT* (1)
DISTURB* (1)
DOUBT* (1)
ENEMI* (1)
ENEMY* (1)
ENRAG* (1)
EXASPERAT* (1)
CONTROVERSIAL* (1)
CRITIQUE (1)
DISPARAG* (1)
IRRITABLE (1)
EXPLOIT* (1)
EXTERMINAT* (1)
FEUD* (1)
FIERC* (1)
FIGHT* (1)
FOUGHT* (1)
FURIOU* (1)
FURY* (1)
GASH* (1)
GRAPPL* (1)
GROWL* (1)
GRUDG* (1)
GUN (1)
GUNN* (1)
GUNS (1)
HARM* (1)
HARSH* (1)
HATE* (1)
HATR* (1)
HIT (1)
HITS (1)
HITT* (1)
HOMICID* (1)
HOSTIL* (1)
HURT* (1)
INGRAT* (1)
INJUR* (1)
INJURY* (1)
INSULT* (1)
INVAD* (1)
INVAS* (1)
IRAT* (1)
IRK* (1)
IRRITAT* (1)
JEALOU* (1)
JEALOUSY* (1)
JEER* (1)
KICK* (1)
KIL* (1)
KILL* (1)
KNIF* (1)
KNIV* (1)
LOATH* (1)
MAIM* (1)
MISTREAT* (1)
MOCK* (1)
MURDER* (1)
OBLITERAT* (1)
OFFEND* (1)
OPPOS* (1)
PREDATORY* (1)
PROTEST* (1)
QUARREL* (1)
RAGE (1)
RAGES (1)
RAGING (1)
RAPIN* (1)
REBEL* (1)
REBELL* (1)
REBUK* (1)
RELENTLES* (1)
REPROACH* (1)
RESENT* (1)
RESENTMENT* (1)
RETRIBUT* (1)
REVENG* (1)
REVOLT* (1)
RIDICUL* (1)
RIP (1)
RIPP* (1)
RIPS (1)
ROB (1)
ROBB* (1)
ROBS (1)
SARCASM* (1)
SARCASTIC* (1)
SCALP* (1)
SCOF* (1)
SCOFF* (1)
SCOURG* (1)
SEIZ* (1)
SEVER* (1)
SEVERITY* (1)
SHATTER* (1)
SHOOT* (1)
SHOT* (1)
SHOV* (1)
SLAIN* (1)
SLANDER* (1)
SLAP* (1)
SLAUGHTER* (1)
SLAY* (1)
SLEW* (1)
SMASH* (1)
SNARL* (1)
SNEER* (1)
SPEAR* (1)
SPITEFUL* (1)
SPURN* (1)
STAB* (1)
STEAL* (1)
STOL* (1)
STOLEN* (1)
STRANGL* (1)
STRIF* (1)
STRIK* (1)
STRUCK* (1)
STRUGGL* (1)
STUBBORN* (1)
SWORD* (1)
TAUNT* (1)
TEMPER* (1)
THREAT* (1)
THREATEN* (1)
TORE (1)
TORMENT* (1)
TORN* (1)
TORTUR* (1)
TRAITOR* (1)
TRAMPL* (1)
TREACHEROU* (1)
TREACHERY* (1)
TYRANT* (1)
UNKIND* (1)
VENGEANC* (1)
VENGEFUL* (1)
VEX (1)
VEXING (1)
VIOLAT* (1)
VIOLENC* (1)
VIOLENT* (1)
WAR (1)
WARRING (1)
WARRIOR* (1)
WARS (1)
WEAPON* (1)
WHIP* (1)
WOUND* (1)
WRATH* (1)
FOOTBALL* (1)
WRECK* (1)
EXPRESSIVE BEHAVIOR
ART (1)
ARTS* (1)
BARD* (1)
BARK* (1)
BAWL* (1)
BELLOW* (1)
BLEAT* (1)
CAROL* (1)
CHANT* (1)
CLOWN* (1)
CRIE* (1)
CRIING (1)
CRY (1)
DANC* (1)
EXCLAIM* (1)
EXPRESSIV* (1)
FRISK* (1)
FROLIC* (1)
GAME* (1)
GUITAR* (1)
HARP* (1)
HORN* (1)
HURRAH* (1)
HURRAY* (1)
LULLABY* (1)
LUTE (1)
LUTES (1)
LYRE (1)
MINSTREL* (1)
NEIGH (1)
NEIGHS (1)
PAINTER* (1)
PLAY* (1)
POEM* (1)
POET* (1)
POETIC* (1)
POETRY* (1)
ROAR* (1)
SANG* (1)
SCREAM* (1)
SHOUT* (1)
SIGH* (1)
SING (1)
SINGS* (1)
SPORT* (1)
SUNG* (1)
TROUBADOR* (1)
TROUBADOUR* (1)
VIOLIN* (1)
WARBL* (1)
YEL* (1)
YELL* (1)
GLORY
ADMIR* (1)
ADMIRABL* (1)
ADVENTUR* (1)
APPLAUD* (1)
APPLAUS* (1)
ARROGANC* (1)
ARROGANT* (1)
AUDACITY* (1)
AWE* (1)
BOAST* (1)
BOASTFUL* (1)
BRILLIANC* (1)
BRILLIANT* (1)
CAESAR* (1)
CASTL* (1)
CONQUE* (1)
CROWN* (1)
DAZZL* (1)
EAGL* (1)
ELIT* (1)
EMPEROR* (1)
EMPIR* (1)
EXALT* (1)
EXHIBIT* (1)
EXQUISIT* (1)
EXTRAORDINARY* (1)
EXTREM* (1)
FAME (1)
FAMED (1)
FAMOU* (1)
FOREMOST* (1)
GENIU* (1)
GLOR* (1)
GOLD* (1)
GOLDEN* (1)
GRANDEUR* (1)
GREAT* (1)
HAUGHTY* (1)
HERO* (1)
HOMAG* (1)
ILLUSTRIOU* (1)
KINGDOM* (1)
MAGESTIC* (1)
MAGNIFICENT* (1)
MAJESTIC* (1)
MAJESTY* (1)
NOBL* (1)
OUTSTAND* (1)
PALAC* (1)
POMP* (1)
PRESTIG* (1)
PRID* (1)
PRINC* (1)
PROUD* (1)
RENOWN* (1)
RESPLENDENT* (1)
RICH* (1)
ROYAL* (1)
ROYALTY* (1)
SCEPTR* (1)
SCORN* (1)
SPLENDID* (1)
SPLENDOR* (1)
STRUT* (1)
SUBLIM* (1)
SUPERIOR* (1)
SUPERIORITY* (1)
SUPREM* (1)
THRON* (1)
TRIUMP* (1)
VICTOR* (1)
VICTORIOU* (1)
VICTORY* (1)
WEALTH* (1)
WONDER* (1)
WONDERFUL* (1)
"""
DEFAULT_RID_EXCLUSION_LIST = """
PROVINC*
MIDDLE-*
DIVERSION*
DIETHYL*
COUNTY*
SHARK*
PRICK*
PASTE*
HANDICAP*
HANDBOOK*
GRAPH*
FACTORIAL*
BUTTERFL*
BLANKET*
FASTI*
ENERGUMEN*
RELIGHT
REVERSE
DISPOSSESS*
NEEDL*
EXTREMITY
EXTREMENESS
EMPIRI*
TEMPERAT*
TEMPERAN*
TEMPERAMENT
SEVERAL
HARMO*
PATTERN
DECADENT
TAXO*
TAXI*
DETERS*
DETERR*
DETERIO*
DETERG*
TRADU*
TRADI*
TESTAM*
SOWBUG*
SELF*
FACTORIAL
COUNTE*
COUNTR*
TREATMENT*
TELLU*
QUOTI*
QUOTH*
PROVIDE*
PROMISC*
METTL*
METR*
METO*
METI*
METH*
METE*
META*
MENTO*
MENTH*
MENTA*
INFORMAL
ELECTR*
CONSOLIDAT*
SOLUTE
PROVER*
MAY-1
POOLED-1
POOL-1
HEATH*
COALESC*
COALI*
UNDERGRADUAT*
UNDERC*
FALLAC*
DIVERGEN*
ARIST*
REGU*
REGR*
REGIS*
REGE*
POSITIV*
PLACEN*
PLACEB*
OVERV*
OVERL*
LIMBIC*
CHINE*
PORTU*
PORTR*
PORTM*
PORTL*
PORTI*
PORTF*
PORTE*
PORTA*
PATHO*
PATHE*
SECRETI*
SECRETO*
SECRETA*
ROLE
QUAKER
TRIPT*
TRIPLE
TRIPE
RESTR*
RESTO*
RESTI*
RESTA*
REPOSSESS*
CONTENTS*
CONTENT-*
HOARY
HOARS
HOARD*
BASKET
AWARD*
TESTIN*
TESTIF*
TESTIMON*
STALK
SPATI*
GRAPHIC*
BREADTH
"""
class RIDApp:
def usage(self, args):
print "usage: %s [-h [-t TITLE] | -d FILE | -e FILE | --add-dict=FILE | --add-exc=FILE]" % (args[0],)
print "%s reads from standard input and writes to standard output." % (args[0],)
print "options:"
print " -h Generate HTML output."
print " -t TITLE Use TITLE as the report heading."
print " -d FILE Replaces the built-in dictionary with FILE."
print " -e FILE Replaces the built-in exclusion list with FILE."
print " --add-dict=FILE Processes FILE as a category dictionary."
print " --add-exc=FILE Processes FILE as an exlusion list."
def run(self, args):
rid = RegressiveImageryDictionary()
load_default_dict = True
load_default_exc = True
html_output = False
title = "RID Analysis"
try:
optlist, args = getopt.getopt(sys.argv[1:], 'd:e:ht:',
['add-dict=', 'add-exc='])
for (o, v) in optlist:
if o == '-d':
rid.load_dictionary_from_file(v)
load_default_dict = False
elif o == '-e':
rid.load_exclusion_list_from_file(v)
load_default_exc = False
elif o == '--add-dict':
rid.load_dictionary_from_file(v)
elif o == '--add-exc':
rid.load_exclusion_list_from_file(v)
elif o == '-h':
html_output = True
elif o == '-t':
title = v
else:
sys.stderr.write("%s: illegal option '%s'\n" % (args[0], o))
self.usage(args)
except getopt.GetoptError, e:
sys.stderr.write("%s: %s\n" % (args[0], e.msg))
self.usage(args)
sys.exit(1)
if load_default_dict:
rid.load_dictionary_from_string(DEFAULT_RID_DICTIONARY)
if load_default_exc:
rid.load_exclusion_list_from_string(DEFAULT_RID_EXCLUSION_LIST)
results = rid.analyze(sys.stdin.read())
if html_output:
rid.display_results_html(results, title)
else:
rid.display_results(results)
if __name__ == '__main__':
app = RIDApp()
app.run(sys.argv)
#######################################################################################################
# From trac.util.compat.py
# Implementation for sorted() for Python versions prior to 2.4
try:
reversed = reversed
except NameError:
def reversed(x):
if hasattr(x, 'keys'):
raise ValueError('mappings do not support reverse iteration')
i = len(x)
while i > 0:
i -= 1
yield x[i]
try:
sorted = sorted
except NameError:
def sorted(iterable, cmp=None, key=None, reverse=False):
"""Partial implementation of the "sorted" function from Python 2.4"""
if key is None:
lst = list(iterable)
else:
lst = [(key(val), idx, val) for idx, val in enumerate(iterable)]
lst.sort()
if key is None:
if reverse:
return lst[::-1]
return lst
if reverse:
lst = reversed(lst)
return [i[-1] for i in lst]
#######################################################################################################
rid = RegressiveImageryDictionary()
rid.load_dictionary_from_string(DEFAULT_RID_DICTIONARY)
rid.load_exclusion_list_from_string(DEFAULT_RID_EXCLUSION_LIST)
# A list of subcategories for each top category, e.g. emotions ->
# ['anxiety', 'glory', 'positive affect', 'sadness', 'expressive behavior', 'affection', 'aggression']
primary = [key.lower() for key in rid.category_tree.children["PRIMARY"].children.keys()]
secondary = [key.lower() for key in rid.category_tree.children["SECONDARY"].children.keys()]
emotions = [key.lower() for key in rid.category_tree.children["EMOTIONS"].children.keys()]
class RIDScoreItem:
def __init__(self, name, count, words, type):
self.name = name
self.count = count
self.words = words
self.type = type
def __str__(self):
return self.name
class RIDScore(list):
def __init__(self, rid, results):
self.primary = 0
self.secondary = 0
self.emotions = 0
self.count(rid, results)
self.populate(results)
def count(self, rid, results):
# Keep a count per top category
# (primary, secondary, emotional)
score = {}
roots = rid.category_tree.children
for key in roots:
score[key] = 0
# Calculate the total count.
# Increase the count for the top category each category belongs to.
total = 0
for category in results.category_count:
count = results.category_count[category]
total += count
for key in roots:
if category.isa(roots[key]):
score[key] += count
# Relativise the score for each top category.
if total > 0:
for key in score:
score[key] = float(score[key]) / total
self.primary = score["PRIMARY"]
self.secondary = score["SECONDARY"]
self.emotions = score["EMOTIONS"]
def populate(self, results):
# A RIDScore is a sorted list of category items,
# with relevant words found in the text assigned to each category.
for (category, count) in sorted(results.category_count.items(), key=lambda x: x[1], reverse=True):
c = RIDScoreItem(
name=category.name.lower(),
count=count,
words=results.category_words[category],
type=category.parent.name.lower()
)
self.append(c)
def __str__(self):
return str([str(item) for item in self])
def categorise(txt):
global rid
results = rid.analyze(txt)
return RIDScore(rid, results) | Python |
# Peter Norvig's spelling corrector, with adaptions for Python 2.3
# http://norvig.com/spell-correct.html
import os
import re
try: from collections import defaultdict
except:
from sets import Set as set
import copy
class defaultdict(dict):
"""Dictionary with a default value for unknown keys.
P. Novig, http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/389639
"""
def __init__(self, default):
self.default = default
def __getitem__(self, key):
if key in self:
return self.get(key)
else:
## Need copy in case self.default is something like []
return self.setdefault(key, copy.deepcopy(self.default))
def __copy__(self):
copy = defaultdict(self.default)
copy.update(self)
return copy
def max(seq, key=None):
def _cmp(a,b):
if key(b) > key(a): return 1
return -1
seq = [e for e in seq]
if key:
seq.sort(_cmp)
else:
seq.sort()
return seq[0]
def words(text):
return re.findall('[a-z]+', text.lower())
def train(features):
try:
# for the custom definition of defaultdict
model = defaultdict(1)
except:
model = defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
path = os.path.join(os.path.dirname(__file__), "spelling.txt")
NWORDS = train(words(open(path).read()))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
n = len(word)
return set([word[0:i]+word[i+1:] for i in range(n)] + # deletion
[word[0:i]+word[i+1]+word[i]+word[i+2:] for i in range(n-1)] + # transposition
[word[0:i]+c+word[i+1:] for i in range(n) for c in alphabet] + # alteration
[word[0:i]+c+word[i:] for i in range(n+1) for c in alphabet]) # insertion
def known_edits2(word):
#return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
s = set()
for e1 in edits1(word):
for e2 in edits1(e1):
if e2 in NWORDS: s.add(e2)
return s
def known(words):
#return set(w for w in words if w in NWORDS)
s = set()
for w in words:
if w in NWORDS: s.add(w)
return s
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=lambda w: NWORDS[w])
def suggest(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return list(candidates)
#print suggest("beautiufl")
#print suggest("beautifull") | Python |
# COMMONSENSE - last updated for NodeBox 1.9.4
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
import numeral
import tags
import wordnet
import singular
# Connectives from a file somewhere in Darwin:
commonsense_connectives = [
"I", "the", "of", "and", "to", "a", "in", "that",
"is", "was", "he", "for", "it", "with", "as", "his",
"on", "be", "at", "by", "i", "this", "had", "not",
"are", "but", "from", "or", "have", "an", "they",
"which", "one", "you", "were", "her", "all", "she",
"there", "would", "their", "we", "him", "been", "has",
"when", "who", "will", "more", "no", "if", "out",
"so", "said", "what", "u", "its", "about", "into",
"than", "them", "can", "only", "other", "new", "some",
"could", "time", "these", "two", "may", "then", "do",
"first", "any", "my", "now", "such", "like", "our",
"over", "man", "me", "even", "most", "made", "after",
"also", "did", "many", "before", "must", "through",
"back", "years", "where", "much", "your", "way",
"well", "down", "should", "because", "each", "just",
"those", "eople", "mr", "how", "too", "little",
"state", "good", "very", "make", "world", "still",
"own", "see", "men", "work", "long", "get", "here",
"between", "both", "life", "being", "under", "never",
"day", "same", "another", "know", "while", "last",
"might", "us", "great", "old", "year", "off",
"come", "since", "against", "go", "came", "right",
"used", "take", "three"
]
# Common connectives:
commonsense_connectives.extend([
"whoever", "nonetheless", "therefore", "although",
"consequently", "furthermore", "whereas",
"nevertheless", "whatever", "however", "besides",
"henceforward", "yet", "until", "alternatively",
"meanwhile", "notwithstanding", "whenever",
"moreover", "despite", "similarly", "firstly",
"secondly", "lastly", "eventually", "gradually",
"finally", "thus", "hence", "accordingly",
"otherwise", "indeed", "though", "unless"
])
def is_connective(word):
""" Guesses whether the word is a connective.
Connectives are conjunctions such as "and", "or", "but",
transition signals such as "moreover", "finally",
and words like "I", "she".
It's useful to filter out connectives
when guessing the concept of a piece of text.
... you don't want "whatever" to be the most important word
parsed from a text.
"""
if word.lower() in commonsense_connectives:
return True
else:
return False
def sentence_keywords(str, top=10, nouns=True, singularize=True, filters=[]):
""" Guesses keywords in a piece of text.
Strips delimiters from the text and counts words occurences.
By default, uses WordNet to filter out words,
and furthermore ignores connectives, numbers and tags.
By default, attempts to singularize nouns.
The return value is a list (length defined by top)
of (count, word) tuples.
For example:
from urllib import urlopen
html = urlopen("http://news.bbc.co.uk/").read()
meta = ["news", "health", "uk", "version", "weather", "video", "sport", "return", "read", "help"]
print sentence_keywords(html, filters=meta)
>>> [(6, 'funeral'), (5, 'beirut'), (3, 'war'), (3, 'service'), (3, 'radio'), (3, 'mull'), (3, 'lebanon'), (3, 'islamist'), (3, 'function'), (3, 'female')]
"""
str = tags.strip_tags(str)
str = str.replace("\n", " ")
str = str.split(" ")
count = {}
for word in str:
word = word.lower()
# Remove special characters.
# Do this a number of times to counter typos like:: this.
for i in range(10):
word = word.strip("(){}[]'\"\r\n\t,.?!;:-*/ ")
# Determine nouns using WordNet.
# Attempt singularization.
noun = False
if nouns == True:
if singularize \
and len(word) > 3 \
and wordnet.is_noun(singular.singular(word)):
noun = True
word = singular.singular(word)
elif wordnet.is_noun(word):
noun = True
# Filter for connectives, numbers, tags
# and (by default) keep only nouns.
if len(word) > 1 \
and not word in filters \
and not is_connective(word) \
and not numeral.is_number(word) \
and not tags.is_tag(word) \
and (not nouns or noun):
if word in count.keys():
count[word] += 1
else:
count[word] = 1
sorted = []
for word in count.keys():
sorted.append((count[word], word))
sorted.sort()
sorted.reverse()
return sorted[:top]
#from urllib import urlopen
#html = urlopen("http://nodebox.net/code/index.php/Ideas_from_the_Heart").read()
#print sentence_keywords(html, singularize=True)
#>>> [(19, 'agent'), (12, 'creativity'), (12, 'art'), (11, 'design'), (11, 'computer'), (10, 'something'), (10, 'composition'), (9, 'concept'), (8, 'problem'), (7, 'need')]
# From ConceptNetNLTools:
# some common words associated with each Paul Ekman's basic emotions.
commonsense_ekman = ["anger", "disgust", "fear", "joy", "sadness", "surprise"]
commonsense_naive_ekman = [
["anger", "angered", "upset", "mad", "angry", "angriness"],
["disgust", "disgusted", "dislike", "abhorrence", "abomination", "detest", "detestation", "exercration", "loathe", "loathing", "odium", "hate", "repugnance", "repulsion", "revulsion", "horror"],
["fear", "fearful", "fright", "scared", "feared", "scare", "frighten", "frightened", "anxious", "anxiety", "panic", "terror", "horror", "intimidation", "creep", "chill", "shiver", "frisson", "danger", "dangerous"],
["joy", "happy", "happiness", "joyful", "joyfulness", "cheer", "cheerful", "cheerfulness", "smile"],
["sadness", "sad", "despair", "depressed", "depression"],
["surprise", "surprised", "surprising", "surprisal", "astonish", "amazement", "amaze", "excite", "excitement", "exciting", "shock", "stun", "stunning", "shocking", "bombshell", "unexpected", "sudden", "thrill", "tingle"]
]
def is_basic_emotion(word):
""" Returns True if the word occurs in the list of basic emotions.
"""
if word.lower().strip() in commonsense_ekman:
return True
else:
return False
def is_emotion(word, shallow=False, pos=None, boolean=True):
""" Guesses whether the word expresses an emotion.
Returns True when the word is an emotion.
When the boolean parameter is set to False,
returns either None or a string hinting at the
emotion the word expresses.
For example:
print is_emotion("blub", pos=wordnet.VERBS, boolean=False)
>>> weep
Preferably the return value would be an is_basic_emotion().
"""
def _return(value):
if boolean and value != None:
return True
elif boolean:
return False
else:
return value
if pos == None \
or pos == wordnet.NOUNS:
ekman = ["anger", "disgust", "fear", "joy", "sadness", "surprise"]
other = ["emotion", "feeling", "expression"]
if pos == wordnet.VERBS:
ekman = ["anger", "disgust", "fear", "enjoy", "sadden", "surprise"]
other = ["empathize", "feel", "express emotion", "express"]
if pos == wordnet.ADJECTIVES \
or pos == wordnet.ADVERBS:
ekman = ["angry", "disgusted", "fearful", "happy", "sad", "surprised"]
other = ["emotional"]
word = word.lower().strip()
# Check the naive lists first.
for i in range(len(commonsense_naive_ekman)):
if word in commonsense_naive_ekman[i]:
return _return(commonsense_ekman[i])
# Fair competition:
# if we shuffle the list we have an equal speed
# for each Ekman emotion to scan.
from random import shuffle
indices = range(len(ekman))
shuffle(indices)
# For each Ekman emotion,
# take all of its senses,
# and check the hyponyms of that sense.
for i in indices:
emotion = ekman[i]
s = wordnet.senses(emotion, pos)
for j in range(len(s)):
if word in s[j]:
return _return(commonsense_ekman[i])
h = wordnet.hyponyms(emotion, j, pos)
h = wordnet.flatten(h)
if word in h:
return _return(commonsense_ekman[i])
# Maybe we get lucky and WordNet has tagged
# the word as a feeling.
if shallow and wordnet.lexname(word, 0, pos) == "feeling":
return _return("feeling")
# Take a generalised word like "emotion"
# and traverse its hyponyms.
# When performing a deep search,
# traverse the hyponyms of those hyponyms as well.
# Example: "yearning" -> "desire" -> "feeling"
for emotion in other:
for w in wordnet.flatten(wordnet.hyponyms(emotion, 0, pos)):
if word == w:
return _return(emotion)
if not shallow:
if word in wordnet.flatten(wordnet.hyponym(w, 0, pos)):
return _return(w)
return _return(None)
def noun_is_emotion(word, shallow=False, boolean=True):
return is_emotion(word, shallow, wordnet.NOUNS, boolean)
def verb_is_emotion(word, shallow=False, boolean=True):
return is_emotion(word, shallow, wordnet.VERBS, boolean)
def adjective_is_emotion(word, shallow=False, boolean=True):
return is_emotion(word, shallow, wordnet.ADJECTIVES, boolean)
def adverb_is_emotion(word, shallow=False, boolean=True):
return is_emotion(word, shallow, wordnet.ADVERBS, boolean)
#print noun_is_emotion("grass")
#print noun_is_emotion("rage", boolean=False)
#print adjective_is_emotion("anxious", boolean=False)
#print verb_is_emotion("snivel", boolean=False)
commonsense_persuasive_nouns = ["you", "money", "save", "new", "results", "health", "easy", "safety", "love", "discovery", "proven", "guarantee", "free", "important", "because", "together", "secrets"]
def is_persuasive(word):
""" Words that evoke powerful emotions.
They have been attributed to research at various universities
but I can't find a real source.
"""
return (word in commonsense_persuasive_nouns) | Python |
# TAGS - last updated for NodeBox 1rc7
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
tags_html = [
"!--", "!doctype", "a", "abbr", "acronym", "address",
"applet", "area", "b", "base", "basefont", "bdo",
"big", "blockquote", "body", "br", "button", "caption",
"center", "cite", "code", "col", "colgroup", "dd", "del",
"dir", "div", "dfn", "dl", "dt", "em", "fieldset",
"font", "form", "frame", "frameset", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "html", "i", "iframe",
"img", "input", "ins", "isindex", "kbd", "label",
"legend", "li", "link", "map", "menu", "meta",
"noframes", "noscript", "object", "ol", "optgroup",
"option", "p", "param", "pre", "q", "s", "samp",
"script", "select", "small", "span", "strike",
"strong", "style", "sub", "sup", "table", "tbody",
"td", "textarea", "tfoot", "th", "thead", "title",
"tr", "tt", "u", "ul", "var", "xmp"
]
def is_tag(str):
if str.startswith("<") and str.endswith(">"):
return True
else:
return False
def is_html_tag(str):
""" Guesses whether the word is a HTML tag.
Checks if the string is a tag,
and if the tag is in the list of HTML entitities.
"""
if is_tag(str):
str = str.strip("</>").lower()
i = str.find(" ")
if i > 0:
str = str[:i]
if str in tags_html:
return True
return False
#print is_html_tag("</HTML>")
#print is_html_tag("<a href>")
#print is_html_tag("<xml>")
import sgmllib
class TagStripper(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def strip(self, html):
self.data = ""
self.feed(html)
self.close()
return self.data
def handle_data(self, data):
self.data += data + " "
def strip_tags(str, clean=True):
s = TagStripper()
str = s.strip(str)
import re
str = re.sub("[ ]+", " ", str)
if clean:
lines = str.split("\n")
str = ""
for l in lines:
if len(l.strip()) > 0:
str += l.strip() + "\n"
str.strip().strip()
return str.strip()
#from urllib import urlopen
#html = urlopen("http://news.bbc.co.uk/").read()
#html = open("bbc.txt", "r").read()
#print strip_tags(html) | Python |
### CREDITS ##########################################################################################
# Copyright (c) 2007 Tom De Smedt.
# See LICENSE.txt for details.
__author__ = "Tom De Smedt"
__version__ = "1.9.4.2"
__copyright__ = "Copyright (c) 2007 Tom De Smedt"
__license__ = "GPL"
### NODEBOX ENGLISH LINGUISTICS ######################################################################
# The Nodebox English Linguistics library adds grammar inflection and semantic operations to NodeBox.
# You can use it to conjugate verbs, pluralize nouns, write out numbers, find dictionary descriptions
# and synonyms for words, summarise texts and parse grammatical structure from sentences.
# The library bundles WordNet, NLTK, Damian Conway's pluralisation rules, Jason Wiener's Brill tagger,
# several algorithms adopted from Michael Granger's Ruby Linguistics module,
# John Wisemans implementation of the Regressive Imagery Dictionary, and
# Peter Norvig's spelling corrector.
######################################################################################################
import article
import commonsense
import numeral
import ordinal
import parser
import singular
import plural
import quantify
import tags
import verb as verb_lib
import wordnet
import rid
import spelling
import ogden
def is_number(value):
return numeral.is_number(value)
def is_noun(word):
return wordnet.is_noun(word)
def is_verb(word):
return wordnet.is_verb(word)
def is_adjective(word):
return wordnet.is_adjective(word)
def is_adverb(word):
return wordnet.is_adverb(word)
def is_tag(value):
return tags.is_tag(value)
def is_html_tag(value):
return tags.is_html_tag(value)
def is_connective(word):
return commonsense.is_connective(word)
def is_basic_emotion(word):
return commonsense.is_basic_emotion(word)
def is_persuasive(word):
return commonsense.is_persuasive(word)
class number:
def ordinal(self, number):
return ordinal.ordinal(number)
def spoken(self, number):
return numeral.spoken_number(number)
def quantify(self, number, word):
return quantify.quantify(word, number)
class list:
def conjunction(self, list, generalize=False):
return quantify.conjunction(list, generalize)
def flatten(self, list):
return wordnet.flatten(list)
class noun:
def article(self, word):
return article.article(word)
def singular(self, word, custom={}):
return singular.noun_singular(word, custom)
def plural(self, word, classical=True, custom={}):
return plural.noun_plural(word, classical, custom)
def is_emotion(self, word, shallow=False, boolean=True):
return commonsense.noun_is_emotion(word, shallow, boolean)
class verb:
def infinitive(self, word):
return verb_lib.verb_infinitive(word)
def conjugate(self, word, tense="infinitive", negate=False):
return verb_lib.verb_conjugate(word, tense, negate)
def present(self, word, person="", negate=False):
return verb_lib.verb_present(word, person, negate)
def present_participle(self, word):
return verb_lib.verb_present_participle(word)
def past(self, word, person="", negate=False):
return verb_lib.verb_past(word, person, negate)
def past_participle(self, word):
return verb_lib.verb_past_participle(word)
def tenses(self):
return verb_lib.verb_all_tenses()
def tense(self, word):
return verb_lib.verb_tense(word)
def is_tense(self, word, tense, negated=False):
return verb_lib.verb_is_tense(word, tense, negated)
def is_present(self, word, person="", negated=False):
return verb_lib.verb_is_present(word, person, negated)
def is_present_participle(self, word):
return verb_lib.verb_is_present_participle(word)
def is_past(self, word, person="", negated=False):
return verb_lib.verb_is_past(word, person, negated)
def is_past_participle(self, word):
return verb_lib.verb_is_past_participle(word)
def is_emotion(self, word, shallow=False, boolean=True):
return commonsense.verb_is_emotion(word, shallow, boolean)
class adjective:
def plural(self, word, classical=True, custom={}):
return plural.adjective_plural(word, classical, custom)
def is_emotion(self, word, shallow=False, boolean=True):
return commonsense.adjective_is_emotion(word, shallow, boolean)
class adverb:
def is_emotion(self, word, shallow=False, boolean=True):
return commonsense.adverb_is_emotion(word, shallow, boolean)
class sentence:
def tag(self, sentence):
return parser.sentence_tag(sentence)
def chunk(self, sentence):
return parser.sentence_chunk(sentence)
def chunk_rules(self, list=None):
if list == None:
return parser.chunk_rules
else:
parser.chunk_rules = list
def traverse(self, sentence, f):
parser.sentence_traverse(sentence, f)
def find(self, sentence, pattern, chunked=True):
return parser.sentence_find(sentence, pattern, chunked)
def tag_description(self, postag):
return parser.tag_description(postag)
class content:
def strip_tags(self, txt, clean=True):
return tags.strip_tags(txt, clean)
def keywords(self, str, top=10, nouns=True, singularize=True, filters=[]):
return commonsense.sentence_keywords(str, top, nouns, singularize, filters)
def categorise(self, str):
return rid.categorise(str)
number = number()
list = list()
noun = noun()
verb = verb()
adjective = adjective()
adverb = adverb()
sentence = sentence()
content = content()
def_prefixes = {
"noun" : noun,
"verb" : verb,
"adjective" : adjective,
"adverb" : adverb
}
defs = ["count_senses", "senses", "gloss", "lexname",
"hyponym", "hyponyms", "hypernym", "hypernyms",
"antonym", "meronym", "holonym", "meet", "absurd_gloss"]
for p in def_prefixes:
for f in defs:
setattr(def_prefixes[p], f, eval("wordnet."+p+"_"+f))
basic = ogden | Python |
# Put the en library in the same folder as your script so NodeBox can find
# the library. It takes some time to load all the data the first time.
try:
# This is the statement you normally use.
en = ximport("en")
except:
# But since these examples are "inside" the library
# we may need to try something different when
# the library is not located in /Application Support
en = ximport("__init__")
# LEXICAL CATEGORIZATION ############################################################
# Returns True when the given value is a number.
print 1, en.is_number(12)
print 2, en.is_number("twelve")
# Returns True when the given string is a noun.
# You can also check for is_verb(), is_adjective() and is_adverb().
print 3, en.is_noun("banana")
# Returns True when the given string is a tag,
# for example HTML or XML.
print 4, en.is_tag("</a>")
# Return True when the string is a HTML tag,
# for example <a> or <body>.
print 5, en.is_html_tag("</person>")
# COMMONSENSE #######################################################################
# Returns True if the given word expresses a basic emotion:
# anger, disgust, fear, joy, sadness, surprise.
print 6, en.is_basic_emotion("cheerful")
# Returns True if the given word is a magic word:
# you, money, save, new, results, health, easy, ...
print 7, en.is_persuasive("money")
# Returns True if the word is a connective:
# nevertheless, whatever, secondly, ...
# and words like I, the, own, him which have little semantical value.
print 8, en.is_connective("but")
# NUMBERS ###########################################################################
# Returns the ordinal of the given number,
# 100 -> 100th, 3 -> 3rd
# twenty-one -> twenty-first
print 9, en.number.ordinal(100)
print 10, en.number.ordinal("twenty-one")
# Writes out the given number:
# 25 -> twenty-five
print 11, en.number.spoken(25)
# QUANTIFICATION ####################################################################
# Quantifies the given word:
# 10 and chickens -> a number of chickens
# 300 and chickens -> hundreds of chickens
print 12, en.number.quantify(800, "chicken")
# Quantifies a list of words:
# several chickens, a pair of geese and a duck
# Notice how goose is correctly pluralized and duck has the right article.
print 13, en.list.conjunction(["goose", "goose", "duck", "chicken", "chicken", "chicken"])
# Quantifies the types of things in the given list:
# several integers
print 14, en.list.conjunction((1,2,3,4,5), generalize=True)
# You can also quantify a library:
# en.list.conjunction(en, generalize=True) ->
# a number of modules, a number of functions, a number of strings,
# a pair of lists, a pair of dictionaries, an en verb, an en sentence,
# an en number, an en noun, an en list, an en content, an en adverb,
# an en adjective, a None type and a DrawingPrimitives Context
# INDEFINITE ARTICLE ################################################################
# Returns the noun with its indefinite article
# university -> a university
# owl -> an owl
# hour -> an hour
print 15, en.noun.article("university")
# PLURALIZATION #####################################################################
# Pluralizes the given noun:
# kitchen knife -> kitchen knives
# part-of-speech -> parts-of-speech
# wolf -> wolves
# child -> children
# You can also do en.adjective.plural().
print 16, en.noun.plural("dog")
# EMOTIONAL VALUE ###################################################################
# Guesses whether the given noun expresses an emotion,
# by checking if there are synonyms of the word that
# are basic emotions.
# Return True or False by default.
print 17, en.noun.is_emotion("anger")
# Or you can return a string which provides some information
# anxious -> fear
# An additional optional parameter shallow=True
# speeds up the lookup process but doesn't check as many synonyms.
# You can also use verb.is_emotion(), adjective.is_emotion() and adverb.is_emotion()
print 18, en.adjective.is_emotion("anxious", boolean=False)
# WORDNET ###########################################################################
# WordNet describes semantic relations between synonym sets.
# Returns the dictionary description:
print 19, en.noun.gloss("book")
# A word can have multiple senses,
# for example "tree" can mean a tree in a forest but also a tree diagram,
# or a person named Sir Herbert Beerbohm Tree:
print 20, en.noun.senses("tree")
# Return the dictionary entry for tree as in tree diagram:
print 21, en.noun.gloss("tree", sense=1)
# Return a categorization for the given word:
# book -> communication
print 22, en.noun.lexname("book")
# Return examples of the given word:
# vehicle -> bumper car, craft, rocket, skibob, sled, steamroller, ...
print 23, en.noun.hyponym("vehicle")
print 24, en.noun.hyponym("tree", sense=1)
# Return abstractions of the given word:
# earth -> terrestrial planet
# earth as in dirt -> material
print 25, en.noun.hypernym("earth")
print 26, en.noun.hypernym("earth", sense=1)
# You can also execute a deep query on hypernyms and hyponyms.
# Notice how returned values become more and more abstract:
# vehicle -> transport -> intrumentation -> artifact -> unit -> physical object -> entity
print 27, en.noun.hypernyms("vehicle", sense=0)
# Return components of the given word:
# computer -> chip, diskette, keyboard, monitor, ...
print 28, en.noun.holonym("computer")
# Return the collection in which the given word can be found:
# tree -> forest
print 29, en.noun.meronym("tree")
# Return the semantic opposite of the word:
# black -> white
print 30, en.noun.antonym("black")
# Find out what two words have in common:
# cat and dog -> carnivore
print 31, en.noun.meet("cat", "dog", sense1=0, sense2=0)
# Return an absurd description for the word:
# typography -> a business deal on a trivial scale
print 32, en.noun.absurd_gloss("typography")
# The return value of a WordNet command is usually a list
# containing other lists of related words, for example:
# [['tree'], ['tree', 'tree diagram'], ['Tree', 'Sir Herbert Beerbohm Tree']]
# You can use the en.list.flatten() command to flatten the list:
print 33, en.list.flatten(en.noun.senses("tree"))
# -> ['tree', 'tree', 'tree diagram', 'Tree', 'Sir Herbert Beerbohm Tree']
# If you want a list of all nouns/verbs/adjectives/adverbs there's the
# en.wordnet.all_nouns(), en.wordnet.all_verbs() ... commands:
print 34, len(en.wordnet.all_nouns())
# All of the commands shown here for nouns are also available for verbs, adjectives and adverbs,
# en.verbs.hypernyms("run"), en.adjective.gloss("beautiful") etc. are valid commands.
# VERB CONJUGATION ##################################################################
# NodeBox English Linguistics knows the verb tenses for about 10000 English verbs.
# Return the infinitive:
# swimming -> swim
print 35, en.verb.infinitive("swimming")
# Return the present tense, for the given person:
# gave -> give
# gave -> he gives
print 36, en.verb.present("gave")
print 37, en.verb.present("gave", person=3, negate=False)
# Known values for person are 1, 2, 3, "1st", "2nd", "3rd", "plural", "*".
# Just use the one you like most.
# Return the present participle tense
# be -> being
print 38, en.verb.present_participle("be")
# Return the past tense:
# give -> gave
# be -> I wasn't
print 39, en.verb.past("give")
print 40, en.verb.past("be", person=1, negate=True)
# Return the past participle tense:
# be -> been
print 41, en.verb.past_participle("be")
# a list of all possible tenses:
print 42, en.verb.tenses()
# Returns the tense of the given verb:
# was -> 1st singular past
print 43, en.verb.tense("was")
# Returns True if the given verb is in the given tense:
print 44, en.verb.is_tense("wasn't", "1st singular past", negated=True)
print 45, en.verb.is_present("does", person=1)
print 46, en.verb.is_present_participle("doing")
print 47, en.verb.is_past_participle("done")
# SHALLOW PARSING ###################################################################
# NodeBox English Linguistics is able to do sentence structure analysis using a
# combination of Jason Wiener's tagger and NLTK's chunker.
# The tagger assigns a part-of-speech tag to each word in the sentence using Brill's
# lexicon. A "postag" is something like NN or VBP marking words as nouns, verbs,
# determiners, pronouns, etc. The chunker is then able to group syntactic units
# in the sentence. A syntactic unit is a determiner followed by adjectives followed
# by a noun, for example, "the tasty little chicken" is a syntactic unit.
# Tag the given sentence.
# The return value is a list of (word, tag) tuples.
print 48, en.sentence.tag("this is so cool")
# -> this/DT is/VBZ so/RB cool/JJ
# There are lots of part-of-speech tags and it takes some time getting to know them.
# This function returns a (description, examples) tuple for a given tag:
# NN -> ('noun, singular or mass', 'tiger, chair, laughter')
print 49, en.sentence.tag_description("NN")
# Returns the chunked sentence:
# For example:
# we are going to school ->
# [['SP',
# ['NP', ('we', 'PRP')],
# ['AP',
# ['VP', ('are', 'VBP'), ('going', 'VBG'), ('to', 'TO')],
# ['NP', ('school', 'NN')]]]]
# Now what does all this mean?
# NP are noun phrases, syntactic units describing a noun, for example: a big fish.
# VP are verb phrases, units of verbs and auxillaries, for example: are going to.
# AP is a verb/argument structure, a verb phrase and a noun phrase being influenced.
# SP is a subject structure: a noun phrase which is the executor of a verb phrase
# or verb/argument structure.
from pprint import pprint
print 50
pprint( en.sentence.chunk("he is always trying to feed her with lies") )
# A handy traverse(sentence, cmd) command lets you feed a chunked sentence
# to your own command chunk by chunk:
print 51
s = "we are going to school"
def callback(chunk, token, tag):
if chunk != None : print en.sentence.tag_description(chunk)[0].upper()
if chunk == None : print token, "("+en.sentence.tag_description(tag)[0]+")"
print ""
en.sentence.traverse(s, callback)
print ""
# Find tag patterns in sentences.
print 52, en.sentence.find("The quick brown fox jumped over the lazy dog?", "(JJ) JJ NN")
print 53, en.sentence.find("The hairy hamsters visited the cruel dentist.", "JJ NN", chunked=False)
print 54, en.sentence.find("All sorts of strange and weird and mysterious things happened.", "JJ and JJ NN")
print 55, en.sentence.find("All sorts of strange and weird and mysterious things happened.", "JJ and JJ (NN)")
print 56, en.sentence.find("Hairy hamsters are animals, mammals, funny creatures, or just very cool things.", "(very) (JJ) NN", chunked=False)
print 57, en.sentence.find("Wildcards are pretty wild.", "wild*", chunked=False)
print 58, en.sentence.find("Hamsters, hairy hamsters, funny hairy hamsters!", "(JJ) (JJ) NN", chunked=False)
# If you want you could feed this command with a list of your own
# regular expression units to chunk, mine are pretty basic as I'm not a linguist.
print 59, en.sentence.chunk_rules()
# SUMMARISATION #####################################################################
# NodeBox English Linguistics is able to strip keywords from a given text.
txt = """
Art can describe several kinds of things: a study of creative skill, a process of
using the creative skill, a product of the creative skill, or the audience’s
experiencing of the creative skill. The creative arts (“art”’ as discipline) are
a collection of disciplines (“arts”) which produce artworks (“art” as objects) that
is compelled by a personal drive (“art” as activity) and echoes or reflects a message,
mood, or symbolism for the viewer to interpret (“art” as experience). Artworks can
be defined by purposeful, creative interpretations of limitless concepts or ideas in
order to communicate something to another person. Artworks can be explicitly made for
this purpose or interpreted based on images or objects.
Art is something that visually stimulates an individual's thoughts, emotions, beliefs
or ideas. Art is a realized expression of an idea-it can take many different forms
and serve many different purposes.
"""
print 60, en.content.keywords(txt, top=10, nouns=True, singularize=True, filters=[])
# Guesses a list of words that frequently occur in the given text.
# The return value is a list (length defined by top) of (count, word) tuples.
# When nouns is True, returns only nouns. The command also ignores connectives,
# numbers and tags.
# When singularize is True, attempts to singularize nouns in the text.
# The optional filters parameter is a list of words which the command should ignore.
# Assuming you would want to summarise web content you can use en.content.strip_tags()
# to strip out HTML and keep only textual content:
print 61, en.content.strip_tags("<a href='http://nodebox.net'>NodeBox</a>")
# For example:
# from urllib import urlopen
# html = urlopen("http://news.bbc.co.uk/").read()
# meta = ["news", "health", "uk", "version", "weather", "video", "sport", "return", "read", "help"]
# print sentence_keywords(html, filters=meta)
# -> [(6, 'funeral'), (5, 'beirut'), (3, 'war'), (3, 'service'), (3, 'radio'), (3, 'mull'),
# (3, 'lebanon'), (3, 'islamist'), (3, 'function'), (3, 'female')]
# SPELLING CORRECTION ###############################################################
print 62, en.spelling.suggest("elehpant")
print 63, en.spelling.correct("kebyoard")
| Python |
# SINGULAR - last updated for NodeBox 1.9.4
# Adapted from Bermi Ferrer's Inflector for Python:
# http://www.bermi.org/inflector/
# Copyright (c) 2006 Bermi Ferrer Martinez
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software to deal in this software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this software, and to permit
# persons to whom this software is furnished to do so, subject to the following
# condition:
#
# THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THIS SOFTWARE.
import re
singular_rules = [
['(?i)(.)ae$' , '\\1a'],
['(?i)(.)itis$' , '\\1itis'],
['(?i)(.)eaux$' , '\\1eau'],
['(?i)(quiz)zes$' , '\\1'],
['(?i)(matr)ices$' , '\\1ix'],
['(?i)(vert|ind)ices$' , '\\1ex'],
['(?i)^(ox)en' , '\\1'],
['(?i)(alias|status)es$' , '\\1'],
['(?i)([octop|vir])i$' , '\\1us'],
['(?i)(cris|ax|test)es$' , '\\1is'],
['(?i)(shoe)s$' , '\\1'],
['(?i)(o)es$' , '\\1'],
['(?i)(bus)es$' , '\\1'],
['(?i)([m|l])ice$' , '\\1ouse'],
['(?i)(x|ch|ss|sh)es$' , '\\1'],
['(?i)(m)ovies$' , '\\1ovie'],
['(?i)ombies$' , '\\1ombie'],
['(?i)(s)eries$' , '\\1eries'],
['(?i)([^aeiouy]|qu)ies$' , '\\1y'],
# Certain words ending in -f or -fe take -ves in the plural (lives, wolves).
["([aeo]l)ves$", "\\1f"],
["([^d]ea)ves$", "\\1f"],
["arves$", "arf"],
["erves$", "erve"],
["([nlw]i)ves$", "\\1fe"],
['(?i)([lr])ves$' , '\\1f'],
["([aeo])ves$", "\\1ve"],
['(?i)(sive)s$' , '\\1'],
['(?i)(tive)s$' , '\\1'],
['(?i)(hive)s$' , '\\1'],
['(?i)([^f])ves$' , '\\1fe'],
['(?i)(^analy)ses$' , '\\1sis'],
['(?i)((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)ses$' , '\\1\\2sis'],
['(?i)(.)opses$' , '\\1opsis'],
['(?i)(.)yses$' , '\\1ysis'],
['(?i)(h|d|r|o|n|b|cl|p)oses$' , '\\1ose'],
['(?i)(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$' , '\\1ose'],
['(?i)(.)oses$' , '\\1osis'],
['(?i)([ti])a$' , '\\1um'],
['(?i)(n)ews$' , '\\1ews'],
['(?i)s$' , ''],
];
singular_uninflected = ["bison", "bream", "breeches", "britches", "carp", "chassis", "clippers", "cod", "contretemps", "corps", "debris", "diabetes", "djinn", "eland", "elk", "flounder", "gallows", "graffiti", "headquarters", "herpes", "high-jinks", "homework", "innings", "jackanapes", "mackerel", "measles", "mews", "mumps", "news", "pincers", "pliers", "proceedings", "rabies", "salmon", "scissors", "series", "shears", "species", "swine", "trout", "tuna", "whiting", "wildebeest"]
singular_uncountable = ["advice", "bread", "butter", "cheese", "electricity", "equipment", "fruit", "furniture", "garbage", "gravel", "happiness", "information", "ketchup", "knowledge", "love", "luggage", "mathematics", "mayonnaise", "meat", "mustard", "news", "progress", "research", "rice", "sand", "software", "understanding", "water"]
singular_ie = ["algerie", "auntie", "beanie", "birdie", "bogie", "bombie", "bookie", "cookie", "cutie", "doggie", "eyrie", "freebie", "goonie", "groupie", "hankie", "hippie", "hoagie", "hottie", "indie", "junkie", "laddie", "laramie", "lingerie", "meanie", "nightie", "oldie", "^pie", "pixie", "quickie", "reverie", "rookie", "softie", "sortie", "stoolie", "sweetie", "techie", "^tie", "toughie", "valkyrie", "veggie", "weenie", "yuppie", "zombie"]
singular_irregular = {
"men" : "man",
"people" : "person",
"children" : "child",
"sexes" : "sex",
"moves" : "move",
"teeth" : "tooth",
"geese" : "goose",
"feet" : "foot",
"zoa" : "zoon",
"atlantes" : "atlas",
"atlases" : "atlas",
"beeves" : "beef",
"brethren" : "brother",
"children" : "child",
"corpora" : "corpus",
"corpuses" : "corpus",
"kine" : "cow",
"ephemerides" : "ephemeris",
"ganglia" : "ganglion",
"genii" : "genie",
"genera" : "genus",
"graffiti" : "graffito",
"helves" : "helve",
"leaves" : "leaf",
"loaves" : "loaf",
"monies" : "money",
"mongooses" : "mongoose",
"mythoi" : "mythos",
"octopodes" : "octopus",
"opera" : "opus",
"opuses" : "opus",
"oxen" : "ox",
"penes" : "penis",
"penises" : "penis",
"soliloquies" : "soliloquy",
"testes" : "testis",
"trilbys" : "trilby",
"turves" : "turf",
"numena" : "numen",
"occipita" : "occiput",
}
# Prepositions are used to solve things like
# "mother-in-law" or "man-at-arms"
plural_prepositions = ["about", "above", "across", "after", "among", "around", "at", "athwart", "before", "behind", "below", "beneath", "beside", "besides", "between", "betwixt", "beyond", "but", "by", "during", "except", "for", "from", "in", "into", "near", "of", "off", "on", "onto", "out", "over", "since", "till", "to", "under", "until", "unto", "upon", "with"]
def singular(word, custom={}):
if word in custom.keys():
return custom[word]
# Recursion of compound words (e.g. mothers-in-law).
if "-" in word:
words = word.split("-")
if len(words) > 1 and words[1] in plural_prepositions:
return singular(words[0], custom)+"-"+"-".join(words[1:])
lower_cased_word = word.lower()
for w in singular_uninflected:
if w.endswith(lower_cased_word):
return word
for w in singular_uncountable:
if w.endswith(lower_cased_word):
return word
for w in singular_ie:
if lower_cased_word.endswith(w+"s"):
return w
for w in singular_irregular.keys():
match = re.search('('+w+')$',word, re.IGNORECASE)
if match:
return re.sub(
'(?i)'+w+'$',
singular_irregular[w], word)
for rule in range(len(singular_rules)):
match = re.search(singular_rules[rule][0], word, re.IGNORECASE)
if match:
groups = match.groups()
for k in range(0,len(groups)):
if groups[k] == None:
singular_rules[rule][1] = singular_rules[rule][1].replace('\\'+str(k+1), '')
return re.sub(
singular_rules[rule][0],
singular_rules[rule][1], word)
return word
def noun_singular(word, custom={}):
return singular(word, custom)
| Python |
# ORDINAL - last updated for NodeBox 1rc7
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
# Based on the Ruby Linguistics module by Michael Granger:
# http://www.deveiate.org/projects/Linguistics/wiki/English
ordinal_nth = {
0 : "th",
1 : "st",
2 : "nd",
3 : "rd",
4 : "th",
5 : "th",
6 : "th",
7 : "th",
8 : "th",
9 : "th",
11 : "th",
12 : "th",
13 : "th",
}
ordinal_suffixes = [
["ty$" , "tieth"],
["one$" , "first"],
["two$" , "second"],
["three$" , "third"],
["five$" , "fifth"],
["eight$" , "eighth"],
["nine$" , "ninth"],
["twelve$", "twelfth"],
["$" , "th"],
]
def ordinal(number):
""" Returns the ordinal word of a given number.
For example: 103 -> 103rd, twenty-one -> twenty first.
The given number can be either integer or string,
returns None otherwise.
"""
if isinstance(number, int):
if ordinal_nth.has_key(number%100):
return str(number) + ordinal_nth[number%100]
else:
return str(number) + ordinal_nth[number%10]
if isinstance(number, str):
import re
for suffix, inflection in ordinal_suffixes:
if re.search(suffix, number) is not None:
return re.sub(suffix, inflection, number)
#print ordinal(103)
#print ordinal("twenty-one") | Python |
# ARTICLE - last updated for NodeBox 1rc7
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
# Based on the Ruby Linguistics module by Michael Granger:
# http://www.deveiate.org/projects/Linguistics/wiki/English
article_rules = [
["euler|hour(?!i)|heir|honest|hono", "an"], # exceptions: an hour, an honor
# Abbreviations
# Strings of capitals starting with a vowel-sound consonant
# followed by another consonant,
# and which are not likely to be real words.
["(?!FJO|[HLMNS]Y.|RY[EO]|SQU|(F[LR]?|[HL]|MN?|N|RH?|S[CHKLMNPTVW]?|X(YL)?)[AEIOU])[FHLMNRSX][A-Z]", "an"],
["^[aefhilmnorsx][.-]", "an"],
["^[a-z][.-]", "a"],
["^[^aeiouy]", "a"], # consonants: a bear
["^e[uw]", "a"], # eu like "you": a european
["^onc?e", "a"], # o like "wa": a one-liner
["uni([^nmd]|mo)", "a"], # u like "you": a university
["^u[bcfhjkqrst][aeiou]", "a"], # u like "you": a uterus
["^[aeiou]", "an"], # vowels: an owl
["y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)", "an"], # y like "i": an yclept, a year
["", "a"] # guess "a"
]
def article(word):
""" Returns the indefinite article for a given word.
For example: university -> a university.
"""
import re
for rule in article_rules:
pattern, article = rule
if re.search(pattern, word) is not None:
return article + " " + word
def a(word):
return article(word)
def an(word):
return article(word)
#print article("hour")
#print article("FBI")
#print article("bear")
#print article("one-liner")
#print article("european")
#print article("university")
#print article("uterus")
#print article("owl")
#print article("yclept")
#print article("year") | Python |
# NUMERAL - last updated for NodeBox 1rc7
# Author: Tom De Smedt <tomdesmedt@organisms.be>
# See LICENSE.txt for details.
# Based on "Numbers and plural words as spoken English" by Christopher Dunn:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/413172
numerals = {
0: "zero",
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine",
10: "ten",
11: "eleven",
12: "twelve",
13: "thirteen",
14: "fourteen",
15: "fifteen",
16: "sixteen",
17: "seventeen",
18: "eighteen",
19: "nineteen",
20: "twenty",
30: "thirty",
40: "forty",
50: "fifty",
60: "sixty",
70: "seventy",
80: "eighty",
90: "ninety"
}
numeral_thousands = ["thousand"]
numeral_thousands.extend([m+"illion" for m in [
"m",
"b",
"tr",
"quadr",
"quint",
"sext",
"sept",
"oct",
"non",
"dec",
"undec",
"duodec",
"tredec",
"quattuordec",
"quindec",
"sexdec",
"septemdec",
"octodec",
"novemdec",
"vigint"
]])
numerals_all = [numerals[i] for i in numerals]
numerals_all.extend(numeral_thousands)
numerals_all.append("hundred")
def is_number(s):
""" Determines whether the string is a number.
A number is:
- a series of digits
- a digit series that contains one comma or one point
- a digit series starting with a minus sign
- a word in the numeral_all list
- a compound numeral like "seventy-three"
"""
s = str(s)
s = s.lstrip("-")
s = s.replace(",", ".", 1)
s = s.replace(".", "0", 1)
import re
if re.match("^[0-9]+$", s):
return True
elif s in numerals_all:
return True
else:
try:
a, b = s.split("-")
if a in numerals_all \
and b in numerals_all:
return True
except:
return False
#print is_number("-20.5")
#print is_number("seventy-three")
def thousands(i):
return numeral_thousands[i]
def _chunk(n):
""" Recursively transforms the number to words.
A number is either in the numerals dictionary,
smaller than hundred and a combination of numeals separated by a dash
(for example: twenty-five),
a multitude of hundred and a remainder,
a multitude of thousand and a remainder.
"""
if n in numerals:
return numerals[n]
ch = str(n)
remainder = 0
if n < 100:
ch = _chunk((n//10)*10) + "-" + _chunk(n%10)
return ch
elif n < 1000:
ch = _chunk(n//100) + " " + "hundred"
remainder = n%100
else:
base = 1000
for i in range(len(numeral_thousands)):
base *= 1000
if n < base:
ch = _chunk(n//(base/1000)) + " " + numeral_thousands[i]
remainder = n%(base/1000)
break
if remainder:
if remainder >= 1000:
separator = ","
elif remainder <= 100:
separator = " and"
else:
separator = ""
return ch + separator + " " + _chunk(remainder)
else:
return ch
def spoken_number(n):
""" Tranforms integers and longs to spoken word.
For example: 2385762345876 ->
two trillion, three hundred and eighty-five billion,
seven hundred and sixty-two million, three hundred and forty-five thousand
eight hundred and seventy-six
"""
if not isinstance(n, int) and not isinstance(n, long):
return n
if n < 0:
if n in numerals:
return numerals[n]
else:
return "minus " + _chunk(-n)
return _chunk(n)
#print spoken_number(5)
#print spoken_number(2004)
#print spoken_number(2385762345876) | Python |
import knowledge
import nltk
import en
import random
class FactSpec(object):
fact = None
aspect = None
which = None
def __init__(self, fact=None, aspect=None, which=None):
self.fact = fact
self.aspect = aspect
self.which = which
def __str__(self):
return str(self.fact) + '.' + str(self.aspect) + ': ' + str(self.which)
import manage
def debug(*strings):
return
for s in strings:
print s,
print
''' Present is a wrapper function that takes a behavior and facts and pushes
those facts to the appropriate function '''
def present(behavior, facts):
if type(behavior) is manage.Transition:
transition(behavior,facts)
if type(behavior) is manage.Query:
query(facts)
if type(behavior) is manage.DeclarativeSingle:
declareSingle(facts)
if type(behavior) is manage.DeclareTriple:
declareTriple(facts)
if type(behavior) is manage.Answer:
answer(facts)
''' Handles output for the transistion between two topics '''
def transition(behavior):
target = ""
ffact = behavior.fromFact.fact
faspect = behavior.fromFact.aspect
fwhich = behavior.fromFact.which
fact = behavior.newFact.fact
aspect = behavior.newFact.aspect
which = behavior.newFact.which
commonality = behavior.commonality
date = ""
if behavior.commonality == "time r":
target += "Also "
if type(fwhich) == knowledge.Date:
date += "in " + fwhich.string()
if type(fwhich) == knowledge.Event:
date += "in " + fwhich.start.string()
if behavior.commonality == "time":
if type(fact) == knowledge.Event:
if aspect in ("start","end"):
target += factEnglish(behavior.fromFact) + ", the same time that " + \
str(fact.subject) + " " + conjugate(str(fact.verb))
return target
else:
target +=factEnglish(behavior.fromFact) + " At the same time, "
if behavior.commonality == "location r" :
target +="Also in " + which.name + ", "
if behavior.commonality == "location" :
target +=factEnglish(behavior.fromFact) + " Also in " + behavior.fromFact.which.name + ", "
x =factEnglish(behavior.newFact)
if (date is not ""):
x=x.replace(date,"")
target += date + ", " + x
else:
target += x + "."
return trim(target)
''' Presents fact in behavior as a question '''
def query (behavior):
options = []
options.append ( trim("Were you aware that " + factEnglish(behavior.fact) + "?"))
options.append( trim("Did you know " + factEnglish(behavior.fact) + "? "))
return random.choice(options)
''' Presents one fact taken from behavior as a declaration '''
def declareSingle (behavior):
options = []
options.append (trim("Well, " + factEnglish(behavior.fact) + "."))
options.append (trim(factEnglish(behavior.fact) + "."))
options.append ( trim("I can tell you that " + factEnglish(behavior.fact) + "."))
options.append (trim("Remember, " + factEnglish(behavior.fact) + "."))
return random.choice(options)
''' Presents three facts from the behavior as declarations '''
def declareTriple (behavior):
target = ""
target += "Well, " + factEnglish(behavior.facts[0]) + ". Also, " + factEnglish(behavior.facts[1])
target += ". And, " + factEnglish(behavior.facts[2]) + "."
return trim(target)
''' Presents the answer to a user submitted question '''
def answer (behavior):
return trim("Oh, " + factEnglish(behavior.fact) + "." )
''' This workhorse function converts a fact relation, expressed in a
factSpec as natural language. Other functions supply a simple wrapper
around the output of factEnglish to complete the sentence. '''
def conjugate (verb):
target = ""
token = nltk.WhitespaceTokenizer()
x = token.tokenize(verb)
x[0] = en.verb.past(x[0],person=1)
for y in x:
target += y + " "
return target
def trim (line):
line = line.replace("date unknown","")
line = line.replace("-","")
line = line.replace(" ", " ")
line = line.replace(" .",".")
line = line.replace(" ?","?")
line = line.replace("( ","(")
line = line.replace(" )",")")
return line
def factEnglish (facts):
target = ""
options = []
if (facts is not list) :
fact = facts.fact
aspect = facts.aspect
which = facts.which
debug('Fact:', fact)
debug('Aspect:', aspect)
debug('Which:', which)
if type(fact) == knowledge.Event:
debug("Event.object = " + str(fact.object))
debug("Event.verb = " + str(fact.verb))
if type(which) == knowledge.Event:
debug("Event.object = " + str(which.object))
debug("Event.verb = " + str(which.verb))
if (type(fact) == knowledge.Location):
if aspect == "events" :
options.append(which.name + " " + fact.name)
elif aspect == "contains" :
options.append( which.name + " " + fact.name)
elif aspect == "within" :
options.append(fact.name + " " + which.name)
if type(fact) == knowledge.Person:
if aspect == "birth" :
target += (fact.name + " was born")
if (which.start == None):
options.append(trim(target))
else:
if (which.start.day) :
target += " on "
else :
target += " in "
debug (which.start.string())
target += which.start.string()
options.append(trim(target))
elif aspect == "death" :
target += fact.name + " died"
if (which.start == None):
options.append(target)
else:
if (which.start.day) :
target += " on "
else :
target += " in "
target += which.start.string()
options.append(trim(target))
elif aspect == "caused" :
target = which.name.replace(fact.name,"")
if (target == which.name) :
target = fact.name + " " + which.name
options.append(trim(target))
else:
target = fact.name + " " + which.name
options.append(trim(target))
elif aspect == "participated":
target = fact.name + " " + which.name.replace(fact.name,"")
options.append(trim(target))
if type(fact) == knowledge.Event:
if aspect in ("start", "end")\
and fact.start == fact.end:
if (which.day):
target += " on "
else:
target += " in "
target += which.string()
verb = conjugate(fact.verb)
name = fact.name.replace(fact.verb,verb)
name = name.replace(target,"") + target
options.append(trim(name))
if aspect == "location" :
options.append(fact.name + " " + which.name)
elif aspect == "causes" :
target = fact.name + " " + which.name
options.append(trim(target))
elif aspect == "results":
options.append(fact.name + " " + which.name)
elif aspect == "instigators" :
target = fact.name.replace(which.name,"")
target = target.replace(fact.verb,conjugate(fact.verb))
target = which.name + "" + target
options.append(trim(target))
elif aspect == "participants" :
target = fact.name.replace(which.name,"")
target = target.replace(fact.verb,conjugate(fact.verb))
target = which.name + "" + target
options.append(trim(target))
if type(fact) == knowledge.Group:
if aspect == "members" :
options.append(which.name + " was a member of " + fact.name)
if type(fact) == knowledge.Object:
if aspect == "creation":
target += fact.name + " was created "
if (which.day) :
target += "on "
else :
target += "in "
target += which.string()
options.append(target)
elif aspect == "creator" :
options.append(fact.name + " " + which.name)
elif aspect == "location" :
options.append(fact.name + " " + which.name)
return random.choice(options)
''' Returns greeting, asks user to pick between two topics expressed in
factSpec (fact and which) or supply their own topic '''
# Lots of canned text to pull from randomly here.
def intro(facts):
fact = facts.fact
which = facts.which
target = ""
target += "Hi. Welcome to flamingsquirrel, the history chat bot. I know"
target += " a lot about the history of the Renaissance. Would you like to"
target += " know about " + fact.name + " or " + which + " or maybe "
target += "something entirely different?"
return target
def confirm (facts):
fact = facts.fact
which = facts.which
target = ""
target += "Did you mean " + fact.name + "?"
return target
def respond ():
options = []
options.append("Are you still there?")
options.append("Please respond.")
options.append("Hey! Where'd you go?")
options.append("You seem to have left, is there anything I can help you with?")
options.append("You there?")
return random.choice(options)
def bye () :
options = []
options.append("Thanks for letting me chew your ear.")
options.append("See you 'round.")
options.append("Bored already? Talk to you later.")
options.append("Nice chatting with you.")
return random.choice(options)
| Python |
from knowledge import Date
from knowledge import Person
from knowledge import Location
from knowledge import Group
from knowledge import Object
from knowledge import Event
from knowledge import Page
def getPage():
trevor = Person(name="trevor" , birth=Date(year=1988))
peter = Person(name="peter", birth=Date(year=1987))
daniel = Person(name="daniel", birth=Date(year=1986))
gregory = Person(name="gregory", birth=Date(year=1985))
georg = Person(name="georg", birth=Date(year=1984))
ca = Location(name="california")
group = Group(name="our group", members=set((
trevor, peter, daniel, gregory, georg)))
proj = Object(name="this project", creation=
Date(year=2010, month=3, day=19),
creator=group)
ucsc = Location(name="ucsc", events= (proj), within=(ca))
proj.location = (ucsc)
ca.contains.add(ucsc)
govna = Person(name="ahnold", birth=Date(year=1950), caused=(ca),
pronoun="he")
prof = Person(name="prof walker", birth=Date(year=1950),
pronoun="she")
ai = Event(name="ai class", start=Date(year=2010, month=1, day=1),
end=Date(year=2010,month=3,day=15),
where=ucsc, participants=set((trevor, peter, daniel, gregory,
georg, prof)))
prof.particpated = (ai)
factxs = [trevor, peter, daniel, gregory, georg, ca, group, proj,
ucsc, govna, prof, ai]
page = Page(url="dummyPage", facts=factxs)
for fact in factxs:
fact.page = page
# print repr(fact)
# print "fake fact list ", factxs
return page
| Python |
#!/usr/bin/env python2.4
import os, pickle
import nltk
import knowledge, parse
CATEGORIES = (
"General",
"People",
"Objects",
"Dates",
)
EXCLUDE = (
"Bad Cases",
)
CREATION_VERBS = (
'painted',
'paints',
'wrote',
'writes',
'built',
'builds',
'created',
'creates',
'sculpted',
'sculpts',
'made',
'makes',
)
BIRTH_VERBS = (
'was born',
)
DEATH_VERBS = (
'died',
'dies',
'was killed',
)
def debug(*strings):
return
for s in strings:
print s,
print
def get_type(node):
if type(node) == str:
return node
elif type(node) == tuple:
return node[0]
elif isinstance(node, nltk.tree.Tree):
for k, v in node.node.items():
if type(k) == nltk.featstruct.Feature and k.name == 'type':
return v
debug("Type not known...")
return "unknown"
def get_children(tree):
types = []
nodes = []
for node in tree:
types.append(get_type(node))
if type(node) == str:
nodes.append(None)
elif type(node) == tuple:
nodes.append(node[1])
elif isinstance(node, nltk.tree.Tree):
nodes.append(node)
else:
debug("Found child node of type", type(node))
return tuple(types), tuple(nodes)
def label_tree(tree, index=0):
for i, node in enumerate(tree):
if type(node) == str:
tree[i] = (tree[i], index)
index += 1
else:
index = label_tree(tree[i], index)
return index
def get_word(idx1, idx2):
return get_words(idx1)[idx2]
def get_words(index_or_words):
if type(index_or_words) == int:
return data['sentences'][index_or_words]['words']
else:
return index_or_words
def understand_sentence(tree, index_or_words, slots=None):
slots = slots or {
'subject': None,
'verb': None,
'object': None,
'location': None,
'date': None,
'start': None,
'end': None,
}
debug("Understanding [sentence]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('S', '.'):
return understand_sentence(nodes[0], index_or_words, slots)
elif types == ('NP', 'VP'):
return understand_verbphrase(
nodes[1],
index_or_words,
understand_subject(
nodes[0],
index_or_words,
slots
)
)
elif types == ('PP', 'NP', 'VP'):
return understand_prepphrase(
nodes[0],
index_or_words,
understand_verbphrase(
nodes[2],
index_or_words,
understand_subject(
nodes[1],
index_or_words,
slots
)
)
)
elif types == ('NP', ',', 'VP', 'PPE'):
return understand_prepphrase(
nodes[3],
index_or_words,
understand_verbphrase(
nodes[2],
index_or_words,
understand_subject(
nodes[0],
index_or_words,
slots
)
)
)
elif types == ('PP', ',', 'NP', 'VP'):
return understand_prepphrase(
nodes[0],
index_or_words,
understand_verbphrase(
nodes[3],
index_or_words,
understand_subject(
nodes[2],
index_or_words,
slots
)
)
)
elif types == ('CD', ',', 'NP', 'VP'):
word = get_word(index_or_words, nodes[0])
slots['date'] = resolve_date(word)
return understand_verbphrase(
nodes[3],
index_or_words,
understand_subject(
nodes[2],
index_or_words,
slots
)
)
elif types == ('CD', ',', 'VP'):
debug("Found CD , VP sentence:", get_words(index_or_words))
return None
else:
return None
def understand_subject(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [subject]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('NP', 'CC', 'NP'):
debug("Found NP CC NP subject:", get_words(index_or_words))
return None
elif types == ('NP', 'PPE'):
return understand_prepphrase(
nodes[1],
index_or_words,
understand_subject(
nodes[0],
index_or_words,
slots
)
)
elif types == ('NP', '(', 'ANYTHING', ')'):
return understand_subject(
nodes[0],
index_or_words,
slots
)
elif types == ('J', 'NP'):
return understand_subject(
nodes[1],
index_or_words,
slots
)
elif types == ('DT', 'NP'):
return understand_subject(
nodes[1],
index_or_words,
slots
)
elif types == ('PS', 'NP'):
debug("Found PS NP subject:", get_words(index_or_words))
return None
elif types == ('N',):
slots = understand_noun(
nodes[0],
index_or_words,
slots
)
if not slots:
return None
slots['subject'] = slots['noun']
del slots['noun']
return slots
else:
return None
def understand_object(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [object]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('NP', 'CC', 'NP'):
debug("Found NP CC NP object:", get_words(index_or_words))
return None
elif types == ('NP', 'PPE'):
# TODO: Something else here?
return understand_prepphrase(
nodes[1],
index_or_words,
understand_object(
nodes[0],
index_or_words,
slots
)
)
elif types == ('NP', '(', 'ANYTHING', ')'):
return understand_object(
nodes[0],
index_or_words,
slots
)
elif types == ('J', 'NP'):
return understand_object(
nodes[1],
index_or_words,
slots
)
elif types == ('DT', 'NP'):
return understand_object(
nodes[1],
index_or_words,
slots
)
elif types == ('PS', 'NP'):
debug("Found PS NP object:", get_words(index_or_words))
return None
elif types == ('N',):
slots = understand_noun(
nodes[0],
index_or_words,
slots
)
if not slots:
return None
slots['object'] = slots['noun']
del slots['noun']
return slots
else:
return None
def understand_prepNP(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [prepNP]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('NP', 'CC', 'NP'):
debug("Found NP CC NP prepNP:", get_words(index_or_words))
return None
elif types == ('NP', 'PPE'):
# TODO: Something else here?
return understand_prepphrase(
nodes[1],
index_or_words,
understand_prepNP(
nodes[0],
index_or_words,
slots
)
)
elif types == ('NP', '(', 'ANYTHING', ')'):
return understand_prepNP(
nodes[0],
index_or_words,
slots
)
elif types == ('J', 'NP'):
return understand_prepNP(
nodes[1],
index_or_words,
slots
)
elif types == ('DT', 'NP'):
return understand_prepNP(
nodes[1],
index_or_words,
slots
)
elif types == ('PS', 'NP'):
debug("Found PS NP prepNP:", get_words(index_or_words))
return None
elif types == ('N',):
slots = understand_noun(
nodes[0],
index_or_words,
slots
)
if not slots:
return None
slots['aux'] = slots['noun']
del slots['noun']
return slots
else:
return None
def understand_noun(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [noun]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('N', 'N'):
slots = understand_noun(nodes[0], index_or_words, slots)
if not slots:
return None
noun = slots['noun']
slots = understand_noun(nodes[1], index_or_words, slots)
if not slots:
return None
slots['noun'] = combine_nouns(noun, slots['noun'])
return slots
elif types == ('N', 'POS'):
debug("Found N POS noun:", get_words(index_or_words))
slots = understand_noun(nodes[0], index_or_words, slots)
return slots
# TODO: More here?
#if not slots:
# return None
#noun = slots['noun']
elif types == ('PRP',):
word = get_word(index_or_words, nodes[0]).lower()
typ = None
if word in ('him', 'her', 'he', 'she'):
typ = "PERSON"
elif word in ('it', 'that'):
typ = "THING"
elif word in ('them', 'they'):
typ = "PEOPLE"
elif word in ('those'):
typ = "THINGS"
else:
debug("Found unidentified pronoun", word)
slots['noun'] = resolve_noun(index_or_words, nodes[0], 'pronoun', typ)
return slots
elif types == ('WP',):
word = get_word(index_or_words, nodes[0])
debug("Found WP noun:", word)
return None
elif types == ('NN',)\
or types == ('NNP',)\
or types == ('NNS',)\
or types == ('NNPS',):
slots['noun'] = resolve_noun(index_or_words, nodes[0], "noun")
return slots
elif types == ('ENT',):
tys, nds = get_children(nodes[0])
typ = tys[0].split('-')[1]
slots['noun'] = resolve_noun(index_or_words, nds[0], 'noun', typ)
return slots
elif types == ('CD',):
word = get_word(index_or_words, nodes[0])
slots['noun'] = resolve_date(word)
if not slots['noun']:
slots['noun'] = resolve_noun(index_or_words, nodes[0], "noun")
return slots
else:
return None
def understand_verbphrase(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [verbphrase]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('V', 'NP'):
return understand_verb(
nodes[0],
index_or_words,
understand_object(
nodes[1],
index_or_words,
slots
)
)
elif types == ('V', 'RP', 'NP'):
return understand_verb(
nodes[0],
index_or_words,
understand_object(
nodes[2],
index_or_words,
slots
)
)
elif types == ('V', 'PPE', 'NP'):
return understand_verb(
nodes[0],
index_or_words,
understand_object(
nodes[2],
index_or_words,
slots
)
)
elif types == ('V',):
return understand_verb(
nodes[0],
index_or_words,
slots
)
elif types == ('VP', 'PPE'):
return understand_prepphrase(
nodes[1],
index_or_words,
understand_verbphrase(
nodes[0],
index_or_words,
slots
)
)
elif types == ('R', 'VP'):
return understand_verbphrase(
nodes[1],
index_or_words,
slots
)
else:
return None
def understand_verb(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [verb]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == ('VBZ', 'V')\
or types == ('VBD', 'V'):
slots = understand_verb(nodes[1], index_or_words, slots)
word = get_word(index_or_words, nodes[0])
slots['verb'] = word + ' ' + slots['verb']
return slots
else:
word = get_word(index_or_words, nodes[0])
slots['verb'] = word
return slots
def understand_prepphrase(tree, index_or_words, slots):
if not slots: return None
debug("Understanding [prepphrase]...")
debug(' ', tree.node)
types, nodes = get_children(tree)
if types == (',', 'PP'):
return understand_prepphrase(
nodes[1],
index_or_words,
slots
)
elif types == ('PP',):
return understand_prepphrase(
nodes[0],
index_or_words,
slots
)
elif types == ('P', 'NP'):
orig = slots
slots = understand_prepNP(
nodes[1],
index_or_words,
slots
)
if not slots:
return orig
tys, nds = get_children(nodes[0])
word = get_word(index_or_words, nds[0]).lower()
if word == 'at' and type(slots['aux']) in (str, knowledge.Location):
slots['location'] = slots['aux']
elif word in ('in', 'on'):
if type(slots['aux']) == knowledge.Location:
slots['location'] = slots['aux']
elif type(slots['aux']) == knowledge.Date:
slots['date'] = slots['aux']
elif type(slots['aux']) == str:
slots['location'] = slots['aux']
elif word == 'to':
slots['location'] = slots['aux']
return slots
elif types == ('VBN', 'NP') or types == ('VBG', 'NP'):
debug("Found V* NP prepphrase:", get_words(index_or_words))
return slots
elif types == ('TO', 'VP'):
debug("Found TO VP prepphrase:", get_words(index_or_words))
return slots
else:
return slots
def resolve_noun(gindex, sindex, typ, subtype=None):
word = get_word(gindex, sindex)
debug("Resolving", word, "as", typ, subtype)
if typ == "noun":
if subtype == None:
m = match_fact(word, knowledge.Person)
if m:
return m
return word
elif subtype in ("PERSON", "GSP"):
debug("Found a person:", word)
match = match_fact(word, knowledge.Person)
# TODO: How sketchy is this?
if not match and ' ' in word:
return knowledge.Person(
name = word,
page = page,
)
elif not match:
return word
else:
return match
elif subtype == "ORGANIZATION":
debug("Found an organization:", word)
return match_fact(word, knowledge.Group)
elif subtype in ("LOCATION", "FACILITY", "GPE"):
debug("Found a location:", word)
match = match_fact(word, knowledge.Location)
if not match and subtype == "GPE":
return knowledge.Location(
name = word,
page = page,
)
elif subtype in ("TIME", "MONEY", "PERCENT", "DATE"):
debug("Found an odd noun type:", subtype, "for word", word)
return None
elif typ == "pronoun":
key = find_key(gindex, sindex, subtype)
if isinstance(key, knowledge.Fact):
return key
else:
return match_fact(key)
else:
debug("Bad noun type:", typ)
return None
def resolve_date(word):
debug("Resolving date", word)
m = match_fact(word, knowledge.Date)
if m:
return m
if word[-1] == 's' and len(word) == 5:
if word[-2] == '0':
if word[-3] == '0':
try:
return knowledge.Date(century=int(word[:-1]), page=page)
except:
return None
else:
try:
return knowledge.Date(decade=int(word[:-1]), page=page)
except:
pass
else:
return None
else:
try:
return knowledge.Date(year=int(word), page=page)
except:
return None
def combine_nouns(first, second):
debug("Trying to combine", first, "and", second)
if first is second:
return first
elif type(first) == str and type(second) == str:
return first + ' ' + second
return None
def find_key(gindex, sindex, subtype):
debug("Trying to find key of type", subtype)
# TODO: Resolution!
def match_fact(key, typ=None):
debug("Trying to match fact", key, "of type", typ)
results = []
realfacts = filter(lambda f: isinstance(f, knowledge.Fact), facts)
dates = filter(lambda f: isinstance(f, knowledge.Date), facts)
if typ == knowledge.Date:
for d in dates:
if key == str(d):
results.append(d)
else:
for f in [_ for _ in realfacts if (typ == None or isinstance(_, typ))]:
if type(key) in (list, tuple):
if all(k in f.name.split(' ') for k in key):
results.append(f)
elif type(key) == str:
if ' ' in key and key in f.name:
results.append(f)
elif key in f.name.split(' '):
results.append(f)
if len(results) == 1:
return results[0]
elif len(results) > 1:
for r in results:
if r.name.replace(' ', '_') == page:
return r
debug(" ...multiple result found for key", key)
return None
else:
debug(" ...no matches found.")
return None
def setup_known_facts(categories):
global facts, pages
for person in categories['People']:
if person in categories['Bad Cases']:
continue
for p in pages:
if p.url == person:
thispage = p
name = person.replace('_', ' ')
facts.add(knowledge.Person(
name=name,
page=thispage,
))
for obj in categories['Objects']:
if obj in categories['Bad Cases']:
continue
for p in pages:
if p.url == obj:
thispage = p
name = obj.replace('_', ' ')
facts.add(knowledge.Object(
name=name,
page=thispage,
))
def get_categories():
fin = open(os.path.join("data", "pages.list"), 'r')
categories = {}
current = None
for line in fin:
line = line.strip()
if line[:2] == '--' and line[-2:] == '--':
current = line[2:-2]
if current not in categories:
categories[current] = []
else:
categories[current].append(line)
fin.close()
return categories
def make_facts(slots):
if not slots:
return
subject = slots['subject']
verb = slots['verb']
object = slots['object']
date = slots['date']
start = slots['start']
end = slots['end']
location = slots['location']
if type(subject) == str:
m = match_fact(subject)
if m:
subject = m
if type(subject) == knowledge.Person:
instigator = subject
else:
instigator = None
if type(object) == str:
m = match_fact(object)
if m:
object = m
if type(object) == knowledge.Person:
participant = subject
else:
participant = None
if type(location) == str:
m = match_fact(location, knowledge.Location)
if m:
location = m
else:
location = knowledge.Location(
name = location,
page = page,
)
if type(date) == str:
m = match_fact(date, knowledge.Date)
if m:
date = m
else:
date = None
if type(start) == str:
m = match_fact(start, knowledge.Date)
if m:
start = m
else:
start = None
if type(end) == str:
m = match_fact(end, knowledge.Date)
if m:
end = m
else:
end = None
occurrence = None
if isinstance(subject, knowledge.Fact) and object:
if start and end:
occurrence = knowledge.Event(
name = str(subject) + ' ' + str(verb) + ' ' + str(object),
subject = subject,
start = start,
end = end,
where = location,
verb = verb,
object = object,
page = page,
instigators = set([instigator]),
participants = set([participant]),
)
else:
occurrence = knowledge.Event(
name = str(subject) + ' ' + str(verb) + ' ' + str(object),
when = date,
where = location,
subject = subject,
verb = verb,
object = object,
page = page,
instigators = set([instigator]),
participants = set([participant]),
)
elif isinstance(subject, knowledge.Fact):
if start and end:
occurrence = knowledge.Event(
name = str(subject) + ' ' + str(verb),
start = start,
end = end,
where = location,
subject = subject,
verb = verb,
object = object,
page = page,
participants = set([instigator]),
)
else:
occurrence = knowledge.Event(
name = str(subject) + ' ' + str(verb),
when = date,
where = location,
subject = subject,
verb = verb,
object = object,
page = page,
participants = set([instigator]),
)
if occurrence:
if occurrence.instigators == set([None]):
occurrence.instigators = None
if occurrence.participants == set([None]):
occurrence.participants = None
if location:
location.events.add(occurrence)
if instigator:
instigator.caused.add(occurrence)
if isinstance(participant, knowledge.Fact):
participant.participated.add(occurrence)
if occurrence.verb in CREATION_VERBS\
and isinstance(subject, knowledge.Person)\
and isinstance(object, knowledge.Object):
object.creator = subject
object.creation = occurrence
if occurrence.verb in BIRTH_VERBS\
and isinstance(subject, knowledge.Person):
subject.birth = occurrence
if occurrence.verb in DEATH_VERBS\
and isinstance(subject, knowledge.Person):
subject.death = occurrence
facts.add(occurrence)
for f in (subject, object, location, date):
if (isinstance(f, knowledge.Fact) or isinstance(f, knowledge.Date))\
and f not in facts:
facts.add(f)
else:
debug("Not adding", f)
def make_easy_facts(date, name, desc, verb):
global page
debug("Making easy facts for page", page)
m = match_fact(name, knowledge.Person)
if m:
subject = m
else:
if reduce(lambda a, b: a or b, [c in name for c in '0123456789[]().,']):
return
else:
subject = knowledge.Person(
name = name,
page = page,
)
m = match_fact(date, knowledge.Date)
if m:
date = m
else:
date = resolve_date(date)
occurrence = None
oldpage = page
for p in pages:
if subject.name.replace(' ', '_') == p.url:
page = p
occurrence = knowledge.Event(
name = str(subject) + ' ' + str(verb) + ' in ' + date.string(),
subject = subject,
when = date,
verb = verb,
page = page,
participants = set([subject]),
)
description = None
if desc:
description = knowledge.Event(
name = str(subject) + " was a " + desc,
subject = subject,
verb = "was a",
page = page,
participants = set([subject]),
object = desc,
)
subject.participated.add(description)
page = oldpage
if verb == 'was born':
if subject.birth:
if subject.birth.start and subject.birth.start.year:
return
else:
subject.birth.start = date
subject.birth.end = date
occurrence = None
else:
subject.birth = occurrence
elif verb == 'died':
if subject.death:
if subject.death.start and subject.death.start.year:
return
else:
subject.death.start = date
subject.death.end = date
occurrence = None
else:
subject.death = occurrence
if occurrence:
facts.add(occurrence)
for f in (subject, date, description):
if (isinstance(f, knowledge.Fact) or isinstance(f, knowledge.Date))\
and f not in facts:
facts.add(f)
else:
debug("Not adding", f)
def main():
global facts, pages, data, page
facts = set()
categories = get_categories()
# categories = { 'People': [], 'Objects': [], 'Dates': ["1550s"], }
# categories = {'People': ["Martin_Luther"], }
# categories = {
# 'General': [
# "Renaissance"
# ],
# 'People': [
# "Martin_Luther",
# "Johann_Gutenberg",
# "Guillaume_Dufay",
# "Claudio_Monteverdi",
# "Verrocchio",
# ],
# 'Objects': [
# "Mona_Lisa",
# ],
# }
pages = []
exclude = []
for c in CATEGORIES:
if c in categories:
pages.extend(categories[c])
for c in EXCLUDE:
if c in categories:
exclude.extend(categories[c])
for e in exclude:
pages.remove(e)
for i, p in enumerate(pages):
pages[i] = knowledge.Page(url=p)
setup_known_facts(categories)
for p in pages:
page = p
data = parse.get(p.url)
for i, d in enumerate(data['sentences']):
if d['tree']:
debug('\n')
debug(d['words'])
debug(d['tags'])
tree = d['tree']
label_tree(tree)
slots = understand_sentence(tree, i)
make_facts(slots)
debug(slots)
if data['listed']:
for d in data['listed']:
if not d['tree']:
continue
tree = d['tree']
label_tree(tree)
date = d['date']
when = None
start = None
end = None
if 'when' in date:
when = resolve_date(date['when'])
else:
start = resolve_date(date['start'])
end = resolve_date(date['end'])
slots = understand_sentence(tree, d['words'])
if not slots:
continue
if when:
slots['date'] = when
else:
slots['start'] = start
slots['end'] = end
make_facts(slots)
debug(slots)
if data['births']:
for date, name, desc in data['births']:
make_easy_facts(date['when'], name, desc, 'was born')
if data['deaths']:
for date, name, desc in data['deaths']:
make_easy_facts(date['when'], name, desc, 'died')
if __name__ == "__main__":
main()
for f in facts:
if not f.page:
debug("Fact has no page:", repr(f))
else:
if f not in f.page.facts and f not in f.page.dates:
if isinstance(f, knowledge.Fact):
f.page.add_fact(f)
elif isinstance(f, knowledge.Date):
f.page.add_date(f)
fout = open(os.path.join('data', 'known', 'facts.pkl'), 'w')
obj = facts, pages
pickle.dump(obj, fout)
fout.close()
| Python |
from present import FactSpec
from knowledge import Fact
from knowledge import Location
from knowledge import Person
from knowledge import Event
from knowledge import Group
from knowledge import Object
from knowledge import Date
import manage
import random
def debug(*strings):
return
for s in strings:
print s,
print
def choose(state, behavior):
if type(behavior) == manage.Query:
fact = getFact(state, behavior)
if not fact:
raise ValueError("page without fact: " + str(state.currentPage))
state.factsPresented.append(fact)
behavior.fact = fact
debug("DECIDE: Query:\n\tfact: ", behavior.fact.fact.name,\
"\n\taspect: ", behavior.fact.aspect,\
"\n\twhich: ", behavior.fact.which,\
"\n\tstateUpdated: ",\
state.factsPresented[-1] is behavior.fact)
return
if type(behavior) == manage.DeclareSingle:
fact = getFact(state, behavior)
if not fact:
raise ValueError("page without fact: " + str(state.currentPage))
state.factsPresented.append(fact)
behavior.fact = fact
debug("DECIDE: DeclSing:\n\tfact: ", behavior.fact.fact.name, \
"\n\taspect: ", behavior.fact.aspect,\
"\n\twhich: ", behavior.fact.which,\
"\n\tstateUpdated: ",\
state.factsPresented[-1] is behavior.fact)
return
if type(behavior) == manage.DeclareTriple:
i = 0
while i < 3:
fact = getFact(state, behavior)
if not fact:
raise ValueError("page without fact: " + str(state.currentPage))
state.factsPresented.append(fact)
behavior.facts[i] = fact
i += 1
debug("DECIDE: DeclMult:\n\tfact1: ", behavior.facts[0].fact.name,\
"\n\taspect: ", behavior.facts[0].aspect,\
"\n\twhich: ", behavior.facts[0].which,\
"\n\tfact2: ", behavior.facts[1].fact.name, \
"\n\taspect: ", behavior.facts[1].aspect,\
"\n\twhich: ", behavior.facts[1].which,\
"\n\tfact3: ", behavior.facts[2].fact.name,\
"\n\taspect: ", behavior.facts[2].aspect,\
"\n\twhich: ", behavior.facts[2].which,\
"\n\tstateUpdated: ",\
behavior.facts[0] is state.factsPresented[-3] and\
behavior.facts[1] is state.factsPresented[-2] and\
behavior.facts[2] is state.factsPresented[-1])
return
if type(behavior) == manage.Transition:
behavior.newFact = None
getTransition(state, behavior)
if not behavior.newFact:
debug("DECIDER:\n\tno good trans fact")
raise ValueError("could not find transitionable fact")
state.newPage = behavior.newFact.fact.page
debug("DECIDE: Transition: \n\tfromFact: ",\
behavior.fromFact.fact.name,\
"\n\taspect: ", behavior.fromFact.aspect,\
"\n\twhich: ", behavior.fromFact.which,\
"\n\tcommon: ", behavior.commonality,\
"\n\tnewFact: ", behavior.newFact.fact.name,\
"\n\taspect: ", behavior.newFact.aspect,\
"\n\twhich: ", behavior.newFact.which,\
"\n\tstateUpdated: ",\
state.factsPresented[-1] is behavior.newFact,\
"\n\tstate.newPage: ", state.newPage)
return
raise TypeError('could not determine behavior type')
return
def getFact(state, behavior):
page = state.currentPage
presentedFacts = state.factsPresented
factList = page.facts
#debug("factList:", factList)
if not factList:
behavior.transition = "transition"
return None
spec = None
attempts = 0
while attempts < 100:
attempt = random.choice(factList)
try:
spec = makeFactSpec(attempt)
if not isIn(spec, presentedFacts):
return spec
except TypeError:
debug("\tBad fact type:", repr(attempt))
except ValueError:
debug("\tEmpty fact:", attempt.name, " attempts: ", attempts)
attempts = attempts + 1
behavior.transition = "transition"
return None
def isIn(spec, list):
for fs in list:
debug("FACT[", spec.fact.name.lower().strip(),",",\
fs.fact.name.lower().strip(),"]\n\tWHICH[",spec.which,\
",",fs.which,"]\n\tASPECT[",str(fs.aspect).strip(),\
",",str(spec.aspect).strip(),"]")
if spec.fact.name.lower().strip() == fs.fact.name.lower().strip():
if fs.which == spec.which:
debug("\tTRUE")
return True
if (fs.aspect in ["birth", "death"] and\
spec.aspect in ["birth", "death"]):
debug("\tTRUE")
return True
if (fs.aspect in ["start", "end", "participants"]) and\
(spec.aspect in ["start", "end", "participants"]):
debug("\tTRUE")
return True
if spec.fact == fs.which or spec.which == fs.fact:
debug("\tTRUE")
return True
debug("\tFALSE")
return False
def makeFactSpec(fact):
typ = type(fact)
fields = []
if (typ == Location):
fields = ["events", "contains", "within"]
elif (typ == Person):
fields = ["birth", "death", "caused", "participated"]
elif (typ == Event):
fields = ["start", "end", "location", "causes", "instigators",\
"participants"]
# Reduces issues:
#fields = ["start", "end", "location", "causes"]
elif (typ == Group):
fields = ["members"]
elif (typ == Object):
fields = ["creation", "creator", "location"]
else:
raise TypeError("could not determine fact type (raw type is "\
+ str(typ) + ")")
choice = random.choice(fields)
attr = getattr(fact, choice)
while not attr:
fields.remove(choice)
if len(fields) == 0:
raise ValueError("empty fact: " + str(fact))
choice = random.choice(fields)
attr = getattr(fact, choice)
if type(attr) in (list, set, tuple):
return FactSpec(fact, choice, random.choice(list(attr)))
else:
return FactSpec(fact, choice, attr)
def getTransition(state, behavior):
fromFact = state.factsPresented[-1]
aspect = "none"
choice = random.randint(1,3)
mustTime = False
mustLoca = False
while not mustTime or not mustLoca:
if not mustTime and (mustLoca or choice == 1):
if fromFact.aspect in ("contains", "within", "location"):
aspect = "location r"
break
else:
aspect = "location"
try:
fromFact = getLocationSpec(fromFact.fact)
break
except TypeError:
mustTime = True
except ValueError:
mustTime = True
else:
if fromFact.aspect in ("start", "end"):
aspect = "time r"
break
else:
aspect = "time"
try:
fromFact = getTimeSpec(fromFact.fact)
break
except TypeError:
mustLoca = True
except ValueError:
mustLoca = True
if (mustTime and mustLoca):
debug ("DECIDER: getTransition\n\tno good transitionable fact")
return
tries = 1
newFact = None
for fact in state.factList:
if fact.page in state.pagesVisited:
continue
if type(fact) == Event and (aspect == "time" or aspect == "time r"):
date_or_event = fromFact.which
#debug("FROM:\t", fromFact)
#debug("ASPECT:\t", aspect)
#debug("DATE OR EVENT:\t", date_or_event)
#debug("FACT:\t", fact,"\n\tbegin:",fact.start,"\n\tend:",fact.end)
if isinstance(date_or_event, Event):
if date_or_event.start or date_or_event.end:
date = date_or_event.start or date_or_event.end
else:
continue
else:
date = date_or_event
if fact.start and fact.end and date.within(fact.start, fact.end):
try:
timeFact = getTimeSpec(fact)
except TypeError:
return
except ValueError:
return
if isIn(timeFact, state.factsPresented):
debug("\tCONTINUING")
continue
debug("\tBREAKING")
newFact = timeFact
newFact = timeFact
break
if aspect == "location" or aspect == "location r":
location = fromFact.which.name.lower
try:
locSpec = getLocationSpec(fact)
if isIn(locSpec, state.factsPresented):
debug("\tCONTINUING")
continue
except TypeError:
return
except ValueError:
return
if locSpec and location == locSpec.which.name.lower:
newFact = locSpec
debug("\tBREAKING")
break
if newFact:
behavior.fromFact = fromFact
behavior.commonality = aspect
behavior.newFact = newFact
state.factsPresented.append(newFact)
debug("DECIDER: getTransition\n\tfromFact: ", behavior.fromFact, \
"\n\tcommonality: " , behavior.commonality, \
"\n\tnewFact:", behavior.newFact)
return
def getLocationSpec(fact):
typ = type(fact)
fields = []
if (typ == Event):
fields = ["location"]
elif (typ == Object):
fields = ["location"]
else:
raise TypeError("no location")
choice = random.choice(fields)
attr = getattr(fact, choice)
while not attr:
fields.remove(choice)
if len(fields) == 0:
raise ValueError("empty fact: " + str(fact))
choice = random.choice(fields)
attr = getattr(fact, choice)
if type(attr) in (list, set, tuple):
return FactSpec(fact, choice, random.choice(list(attr)))
else:
return FactSpec(fact, choice, attr)
def getTimeSpec(fact):
typ = type(fact)
fields = []
if (typ == Person):
fields = ["birth"]
elif (typ == Event):
fields = ["start"]
elif (typ == Object):
fields = ["creation"]
else:
raise TypeError("no time")
choice = random.choice(fields)
attr = getattr(fact, choice)
while not attr:
fields.remove(choice)
if len(fields) == 0:
raise ValueError("empty fact: " + str(fact))
choice = random.choice(fields)
attr = getattr(fact, choice)
if type(attr) in (list, set, tuple):
return FactSpec(fact, choice, random.choice(list(attr)))
else:
return FactSpec(fact, choice, attr)
def isEmptyFact(fact):
typ = type(fact)
fields = []
if (typ == Location):
fields = ["events", "contains", "within"]
elif (typ == Person):
fields = ["birth", "death", "caused", "participated"]
elif (typ == Event):
fields = ["start", "end", "location", "causes", "instigators",\
"participants"]
elif (typ == Group):
fields = ["members"]
elif (typ == Object):
fields = ["creation", "creator", "location"]
else:
return True
for choice in fields:
attr = getattr(fact, choice)
if attr != None:
if type(attr) in (list, set, tuple) and len(attr) == 0:
continue
return False
return True
| Python |
"""-----------------------------------------------------------------------------
manage.py
This module consists of 3 main components: the Behavior class hierarchy, the
definition of State, and the Manager class. manage.py chooses what type of
Behavior object to create and then passes this object(along with state and input
as needed) to decider and present.
-----------------------------------------------------------------------------"""
import present
from present import FactSpec
from decider import choose
from decider import isEmptyFact
from understand import get_meaning
import knowledge
from knowledge import Fact
import random, threading, collections, re, sys
from time import sleep
from select import select
# global variables for communication across threads
inp = ""
inputf = threading.Event()
exitf = threading.Event()
#list of topic names
topicList = []
topicDict = {}
def output(*strings):
for s in strings:
print str(s),
print
def debug(*strings):
return
for s in strings:
print str(s),
print
def checkin(input):
return input
"""-----------------------------------------------------------------------------
Behavior - superclass for all behaviors.
This superclass used to contain fields for transitions, but now user-initiated
transitions are handled different.
present() - dummy function which all derived Behaviors redefine. Necessary(?)
__init__() is undefined because only empty Behaviors should ever be created(?).
-----------------------------------------------------------------------------"""
class Behavior(object):
def present(self):
pass
def __repr__(self):
return "Behavior()"
"""-----------------------------------------------------------------------------
Transition - the behavior in which the agent asks the user whether they would
like to continue learning about the current subject or transition to another.
commonality - a string describing the relationship between fromFact and
newFact.
-----------------------------------------------------------------------------"""
class Transition(Behavior):
newFact = FactSpec()
fromFact = FactSpec()
commonality = ""
def present(self):
return present.transition(self)
def __repr__(self):
return "Transition()"
"""-----------------------------------------------------------------------------
Query - Behavior in which the agent asks the user whether they know a certain
fact.
-----------------------------------------------------------------------------"""
class Query(Behavior):
fact = FactSpec()
def present(self):
return present.query(self)
def __repr__(self):
return "Query()"
"""-----------------------------------------------------------------------------
DeclareSingle - Behavior in which the agent states a single fact.
-----------------------------------------------------------------------------"""
class DeclareSingle(Behavior):
fact = FactSpec()
def present(self):
return present.declareSingle(self)
def __repr__(self):
return "DeclareSingle()"
"""-----------------------------------------------------------------------------
DeclareTriple - Behavior in which the agent states exactly three facts.
May change to state n facts in future.
facts is a list of exactly 3 FactSpec objects to be presented.
commonality is a string(type subject to change) indicating how the facts are
related.
-----------------------------------------------------------------------------"""
class DeclareTriple(Behavior):
facts = [FactSpec(), FactSpec(), FactSpec()]
def present(self):
return present.declareTriple(self)
def __repr__(self):
return "DeclareTriple()"
"""-----------------------------------------------------------------------------
State - class representing state for use by manage.py and decider.py
variables primarily used by decider.py:
factsPresented - list of FactSpecs
pagesVisited - list of strings(?)
currentPage - string(?)
newPage - string set by decider on Transition behaviors(?)
variables used by manage.py in choosing behaviors:
lastTransition/Query/Declare - number of Behaviors chosen since last Transition
/Query/Declare
pageQueries = # of queries since last transition
knownQueries = # of queries known since last transition
p = list of probabilities associated with each behaviors. Association is
[Transition, Query, DeclareSingle, DeclareTriple]. Always sum to 100.
-----------------------------------------------------------------------------"""
class State(object):
factsPresented = list()
pagesVisited = list()
currentPage = ""
newPage = ""
pageList = None
factList = None
lastTransition = 0
lastQuery = 0
lastDeclare = 0
p = [0, 50, 40, 10]
pageQueries = 0
knownQueries = 0
def __init__(self):
self.factsPresented = []
self.pagesVisited = []
self.p = [0, 50, 40, 10]
"""-----------------------------------------------------------------------------
pickBehavior - returns Behavior object chosen randomly using list of prob.'s
Equations imply some basics about behavior:
-Topic must be discussed for at least 5 "turns." Changing too quickly, even if
it happens at low probability is easiest way to make bot look stupid.
-Currently first Behavior on new subject is always a Query. Might try:
-- first term = min(100 - x^2.5, 0) which has effect of making the "rapidly
assess knowledge after subject change" idea more pronounced
-- something like 25cos(x/pi) + 25 so there is ebb-and-flow to assess/teach
-DeclareSingle is always 4x more likely than DeclareTriple
-Probability of Transition will be positive after 25 facts regardless of how
much user knows. Probably won't be a problem.
Probabilities may seem low, but they are iterated upon, so I am guesstimating
that they might be okay. Need to experiment.
-----------------------------------------------------------------------------"""
def pickBehavior(s):
#temporary behavior logic for testing
blist = [Transition(), DeclareSingle(), DeclareTriple(), Query()]
if s.lastTransition > 0:
return random.choice(blist)
else:
return random.choice(blist[1:])
# pick transition probability
"""if s.lastTransition < 5:
s.p[0] = 0
else:
s.p[0] = s.lastTransition * .5 + (s.knownQueries/s.pageQueries-.5) * 25
# query probability
s.p[1] = 100 * (s.pageQueries + 1)**-1.5 + s.lastQuery * 10
# declare probability
s.p[2] = 0.8 * (100 - s.p[0] - s.p[1])
s.p[3] = 100 - s.p[0] - s.p[1] - s.p[2]
# pick using weights. Easier way?
b = random.uniform(0, 100)
#if b >= 0 and b < s.p[0]:
if True:
return Transition()
if b >= s.p[0] and b < s.p[0] + s.p[1]:
return Query()
if b >= s.p[0] + s.p[1] and b < s.p[0] + s.p[1] + s.p[2]:
return DeclareSingle()
return DeclareTriple()"""
"""-----------------------------------------------------------------------------
updateState - takes state, behavior, and a string returned by understand and
changes all elements of state which manage.py should ever change except
currentPage, which is changed in checkTransition().
-----------------------------------------------------------------------------"""
def updateState(state, b, s):
if s == "quit":
exitf.set()
return
if isinstance(b, Transition):
state.lastTransition = 0
state.lastQuery += 1
state.lastDeclare += 1
elif isinstance(b, Query):
state.lastTransition += 1
state.lastQuery = 0
state.lastDeclare += 1
state.pageQueries += 1
if s == "yes":
state.knownQueries += 1
else:
state.lastTransition += 1
state.lastQuery += 1
state.lastDeclare = 0
# returns true if user has typed anything, doesn't work in windows
# haven't tested or used yet
def poll():
return bool(len(select(sys.stdin, None, None)[0]))
"""-----------------------------------------------------------------------------
nag - waits on the user different amount of time depending on behavior.
Very easy improvement is to add a bunch of different strings
Could also add things like 10% chance to say "Isn't that interesting?" or
something on Declare behaviors after a few seconds of no input.
-----------------------------------------------------------------------------"""
def nag(b, inputThread):
global inputf
if isinstance(b, Transition):
inputf.wait(30)
if not inputf.isSet():
output("I guess not..")
if isinstance(b, Query):
inputf.wait(20)
# currently input is mandatory for queries. I don't have an opinion as
# to whether it should be or not.
while not inputf.isSet():
output("Are you there? ", b.present())
inputf.wait(25)
if isinstance(b, DeclareSingle):
inputf.wait(20)
if isinstance(b, DeclareTriple):
inputf.wait(35)
inputf.clear()
return
"""-----------------------------------------------------------------------------
Spell-checking functions - see http://norvig.com/spell-correct.html to see how
they work. correct() returns its input or a valid word within edit distance of 2
Very easy to increase to edit distance of 3, but might slow down program a lot.
Only checks edit distance if word is sufficiently long. Otherwise pants = Dante
and poop = Pope :/
-----------------------------------------------------------------------------"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
digits = "0123456789s"
NWORDS = {}
def words(text): return re.findall('[a-z]+[a-z]+[a-z]+', text.lower())
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
def edits1(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def dedits1(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in digits if b]
inserts = [a + c + b for a, b in splits for c in digits]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
if word.isalpha() and len(word) > 4:
candidates = known([word]) or known(edits1(word)) or [word]
if len(word) > 5:
candidates = candidates or known_edits2(word)
else:
candidates = known([word]) or known(dedits1(word)) or [word]
max = 1
maxw = word
for w in candidates:
if NWORDS[w] > max:
maxw = w
max = NWORDS[w]
return maxw
"""-----------------------------------------------------------------------------
pickTopic - returns topic if there is one that is similar enough. It might be
better to search through facts or both topic names and facts. This method
only searches through topic names.
I am unsure how this module will end up accessing Page objects. Hopefully
there is a list of them somewhere that I can access.
Could be simplified greatly, probably.
-----------------------------------------------------------------------------"""
def pickTopic(meaning):
global topicList
bestMatch = topicList[0]
bestSim = 0
for vector in meaning[1:]:
cvector = [correct(w) for w in vector]
for topic in topicList:
sim = calcSim(cvector, topic)
if sim > bestSim:
bestSim = sim
bestMatch = topic
bestVector = vector
if bestSim >= 1:
return bestMatch, bestVector
return None, None
"""-----------------------------------------------------------------------------
calcSim - helper function that returns the similarity between two vectors of
strings. The similarity is simply the number of words in the vector within
an edit distance of 2 of any word in the topic name.
-----------------------------------------------------------------------------"""
def calcSim(vector, topic):
topic = topic.lower()
sim = 0
for i in range(0, len(vector)):
vector[i] = r"\b" + vector[i].lower() + r"\b"
if re.search(vector[i], topic) != None:
sim += 1
return sim
def goodPage(page):
if page.empty:
return False
else:
for fact in page.facts:
if not isEmptyFact(fact):
return True
return False
"""-----------------------------------------------------------------------------
greet - greets the user with two randomly selected subjects. Currently these
can't be years because we don't deal with the decade special case.
-----------------------------------------------------------------------------"""
def greet():
print goodPage(topicDict["The Last Supper"])
topic1 = random.choice(topicList)
while not goodPage(topicDict[topic1]):
topic1 = random.choice(topicList)
topic2 = random.choice(topicList)
while not goodPage(topicDict[topic2]) and topic2 != topic1:
topic2 = random.choice(topicList)
while topic1 == topic2:
topic2 = random.choice(topicList)
facts = FactSpec(Fact(topic1), None, topic2)
output(present.intro(facts))
inputf.wait(40)
while not inputf.isSet():
output("Please tell me what you'd like to learn about.")
inputf.wait(45)
inputf.clear()
"""-----------------------------------------------------------------------------
checkTransition - asks the user if he wants to transition to a certain subject
and determines the response.
-----------------------------------------------------------------------------"""
def checkTransition(meaning, state):
if meaning[0] == "quit":
exitf.set()
substrf = False
if len(meaning) < 2:
return
newTopic, nounPhrase = pickTopic(meaning)
if newTopic == None or not goodPage(topicDict[newTopic]):
return
for w in nounPhrase:
if w.lower() in newTopic.lower() and len(w) > 3:
substrf = True
if not substrf:
facts = FactSpec(Fact(newTopic))
#ask and demand response
output(present.confirm(facts))
inputf.wait(30)
while not inputf.isSet():
output("Please answer.", present.confirm(facts))
inputf.wait(25)
inputf.clear()
# analyze user's response
meaning = get_meaning(inp)
if meaning[0] == "yes":
output("Okay. I will tell you about " + newTopic + ".")
state.lastTransition = 0
state.pageQueries = 0
state.knownQueries = 0
state.currentPage = topicDict[newTopic]
if meaning[0] == "no":
output("Then let's continue talking about".
state.currentPage.url.replace("_", " ") + ".")
if meaning[0] == "unknown":
output("I don't understand. Let's just talk about",
state.currentPage.url.replace("_", " ") + ".")
if meaning[0] == "quit":
exitf.setf()
sleep(1)
else:
state.currentPage = topicDict[newTopic]
class InputThread(threading.Thread):
def run(self):
global inp
while True:
try: inp = checkin(raw_input())
except EOFError:
exitf.set()
inputf.set()
"""-----------------------------------------------------------------------------
Manager - Manages Behaviors, contains and updates state, gets input.
converse - contains primary program loop and initialization
-----------------------------------------------------------------------------"""
class Manager:
def __init__(self):
global NWORDS
global topicList
global inputThread
self.facts, self.pages = knowledge.load_facts()
self.dates = filter(lambda d: isinstance(d, knowledge.Date), self.facts)
self.facts = filter(lambda d: isinstance(d, knowledge.Fact), self.facts)
# Create topicList
for p in self.pages:
topicList.append(p.url.replace('_', ' '))
topicDict[p.url.replace('_', ' ')] = p
NWORDS = train(words('\n'.join(p.url for p in self.pages)))
inputThread = InputThread()
inputThread.setDaemon(True)
inputThread.start()
def converse(self):
state = State()
state.currentPage = self.pages[0]
state.pageList = filter(lambda p: not p.empty, self.pages)
state.factList = self.facts
greet()
checkTransition(get_meaning(inp), state)
while not exitf.isSet():
b = pickBehavior(state)
debug(type(b))
try:
choose(state, b)
except ValueError:
pageProblem(b, state)
continue
output(b.present())
nag(b, inputThread)
meaning = get_meaning(inp)
updateState(state, b, meaning[0])
if isinstance(b, Transition) and meaning[0] == "yes":
state.pagesVisited.append(state.newPage)
state.currentPage = state.newPage
state.newPage = None
checkTransition(meaning, state)
print present.bye()
def pageProblem(behavior, state):
if type(behavior) == Transition:
rand = random.choice(state.pageList)
tries = 0
while True:
if rand in state.pagesVisited or rand.empty:
rand = random.choice(state.pageList)
tries = tries + 1
if tries > 100:
exitf.set()
return
else:
toBreak = False
for fact in rand.facts:
if not isEmptyFact(fact):
toBreak = True
break
if toBreak:
break
rand = random.choice(state.pageList)
tries = tries + 1
if tries > 100:
exitf.set()
return
state.currentPage = rand
state.pagesVisited.append(rand)
print "Let's change the subject to " + str(rand)
else:
current = state.currentPage
rand = random.choice(state.pageList)
tries = 0
while True:
if rand in state.pagesVisited or rand.empty:
rand = random.choice(state.pageList)
tries = tries + 1
if tries > 100:
exitf.set()
return
else:
toBreak = False
for fact in rand.facts:
if not isEmptyFact(fact):
toBreak = True
break
if toBreak:
break
rand = random.choice(state.pageList)
tries = tries + 1
if tries > 100:
exitf.set()
return
state.currentPage = rand
state.pagesVisited.append(rand)
print "Actually, I don't know anything more about " + \
str(current) + ". Let's talk about " + str(rand)
| Python |
import pickle, os, sys, re
import xml.dom.minidom, htmlentitydefs
import nltk
CATEGORIES = (
"General",
"People",
"Objects",
)
DATES = (
"Dates",
)
EXCLUDE = (
"Bad Cases",
"Finished",
)
def debug(*strings):
for s in strings:
print s,
print
fin = open(os.path.join("data", "pages.list"), 'r')
categories = {}
current = None
for line in fin:
line = line.strip()
if line[:2] == '--' and line[-2:] == '--':
current = line[2:-2]
if current not in categories:
categories[current] = []
else:
categories[current].append(line)
fin.close()
POS_TAGGER = "taggers/maxent_treebank_pos_tagger/english.pickle"
def getDivChildren(node):
return [c for c in node.childNodes
if hasattr(c, 'tagName') and c.tagName == "div"]
def stripSups(node):
for child in node.childNodes:
if child.nodeName == u'sup':
node.removeChild(child)
else:
stripSups(child)
def chopTail(thing, where):
tail = thing
index = 0
while where in tail[len(where):]:
index += tail[len(where):].index(where) + len(where)
tail = tail[tail[len(where):].index(where) + len(where):]
if index > 0:
return thing[:index]
else:
return thing
def retag(sentence):
retagged = []
for item in sentence:
if isinstance(item, nltk.tree.Tree):
typ = item.node
contents = item.leaves()
retagged.append(
(' '.join(word for word, tag in contents),
"ENT-" + typ)
)
elif item[0] == '(':
retagged.append(('(', '('))
elif item[0] == ')':
retagged.append((')', ')'))
else:
retagged.append(item)
return retagged
tagger = nltk.data.load(POS_TAGGER)
labels = tagger.classifier().labels()
# Labels are defined online at:
# http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
fin = open(os.path.join('data', 'basic.fcfg'), 'r')
grammar = nltk.grammar.parse_fcfg(fin.read())
fin.close()
parser = nltk.parse.earleychart.FeatureEarleyChartParser(grammar)
def strip_matched(start, end, data):
while start in data:
first = data.index(start)
last = first + data[first:].index(end) + len(end)
data = data[:first] + data[last:]
return data
def get_clean_body(data):
stripped = data[data.index("<body "):data.index("</body>")] + "</body>"
data = "<html>" + stripped + "</html>"
data = strip_matched("<script", "</script>", data)
data = strip_matched("<form", "</form>", data)
data = strip_matched("<img", ">", data)
data = data.replace(' ', ' ')
data = re.sub('<br[^>]*>', '<br/>', data)
return data
def cleanup_bs(html):
html = html.replace('</div>', '.')
html = html.replace('</h2>', '.')
html = html.replace('</h3>', '.')
html = html.replace('</td>', '.')
html = html.replace('</p>', '.')
html = html.replace('</li>', '.')
codepoints = re.findall(r'&#(\d+);', html)
entities = re.findall(r'&[^;]+;', html)
for c in codepoints:
html = html.replace(c, unichr(int(c[2:-1])))
for e in set(entities):
if e[1:-1] in htmlentitydefs.name2codepoint:
codepoint = htmlentitydefs.name2codepoint[e[1:-1]]
replacement = unichr(codepoint).encode('ascii', 'ignore')
html = html.replace(e, replacement)
result = nltk.util.clean_html(html)
result = result.replace('[ edit ]', '.')
result = result.replace('[ change ]', '.')
return result
def process(*pages):
for page in pages:
fin = open(os.path.join('data', 'raw', page), 'r')
data = fin.read()
fin.close()
if data[:9] == "CATEGORY:":
category = data[:data.index('\n')][9:]
data = data[data.index('\n') + 1:]
data = get_clean_body(data)
debug('Parsing:', page)
doc = xml.dom.minidom.parseString(data)
title = doc.getElementsByTagName('h1')[0].firstChild.nodeValue
debug('Title:', title)
body = doc.getElementsByTagName('body')[0]
maindiv = getDivChildren(body)[0]
content = getDivChildren(maindiv)[0]
stripSups(content)
html = content.toxml().encode('ascii', 'ignore')
doc.unlink()
result = cleanup_bs(html)
result = chopTail(result, 'Other pages')
result = chopTail(result, 'References')
while '..' in result:
result = result.replace('..', '.')
sents = [nltk.tokenize.word_tokenize(s.strip())
for s in nltk.tokenize.sent_tokenize(result) if len(s) > 3]
# (useful) HACK!
dates = []
for i, s in enumerate(sents):
if len(s) < 4:
continue
if re.match(r"\d+s?", s[0]):
if s[1] == ',':
date = { 'when': s[0] }
event = s[2:]
elif s[1] == '-' and re.match(r"\d+s?", s[2]) and s[3] == ',':
date = {
'start': s[0],
'end': s[2],
}
event = s[4:]
else:
continue
del sents[i]
dates.append((date, event))
for i, listed in enumerate(dates):
date, event = listed
tagged = tagger.tag(event)
chunked = nltk.chunk.ne_chunk(tagged)
nametagged = retag(chunked)
tags = []
words = []
for word, tag in nametagged:
words.append(word)
tags.append(tag)
tree = parser.parse(tags)
dates[i] = {
'date': date,
'words': words,
'tags': tags,
'tree': tree,
}
# DEBUG
#debug(date)
#debug(words)
#debug(tags)
#debug(parsed)
# /DEBUG
# /HACK
tagged = tagger.batch_tag(sents)
chunked = nltk.chunk.batch_ne_chunk(tagged)
for i, s in enumerate(chunked):
chunked[i] = retag(s)
tags = []
words = []
for sent in chunked:
words.append([])
tags.append([])
for word, tag in sent:
words[-1].append(word)
tags[-1].append(tag)
# DEBUG
#while True:
# I = int(raw_input("Sentence (max=%d):" % len(tags)))
# debug(tags[I])
# debug(words[I])
# debug(parser.parse(tags[I]))
#exit(1)
# /DEBUG
trees = parser.batch_parse(tags)
items = []
for i, sentence in enumerate(words):
items.append({
'words':sentence,
'tags':tags[i],
'tree':trees[i],
})
# DEBUG
debug("Total sentences:", len(items))
debug("Parsed sentences:", len([i for i in items if i['tree'] != None]))
# /DEBUG
fout = open(os.path.join('data', 'parsed', page + '.pkl'), 'w')
pickle.dump({
'title': title,
'sentences': items,
'listed': dates,
'births': None,
'deaths': None,
}, fout)
fout.close()
def process_dates(*dates):
# (useful) HACK!
for page in dates:
fin = open(os.path.join('data', 'raw', page), 'r')
data = fin.read()
fin.close()
if data[:9] == "CATEGORY:":
category = data[:data.index('\n')][9:]
data = data[data.index('\n') + 1:]
data = get_clean_body(data)
debug('Parsing:', page)
doc = xml.dom.minidom.parseString(data)
title = doc.getElementsByTagName('h1')[0].firstChild.nodeValue
debug('Title:', title)
body = doc.getElementsByTagName('body')[0]
maindiv = getDivChildren(body)[0]
content = getDivChildren(maindiv)[0]
stripSups(content)
html = content.toxml().encode('ascii', 'ignore')
doc.unlink()
ie = -1
ib = -1
id = -1
ilast = -1
if 'id="Events"' in html:
ie = html.index('id="Events"')
if 'id="Births"' in html:
ib = html.index('id="Births"')
if 'id="Deaths"' in html:
id = html.index('id="Deaths"')
if '<div class="printfooter">' in html:
ilast = html.index('<div class="printfooter">')
if '<div class="notice plainlinks">' in html:
ilast = html.index('<div class="notice plainlinks">')
if ib < 0:
ib = ilast
if id < 0:
id = ilast
events = html[ie:ib]
births = html[ib:id]
deaths = html[id:ilast]
date = {
'when': title
}
if events:
if ib != ilast:
events = events[events.index('</span>') + 7:events.rindex('<span')]
else:
events = events[events.index('</span>') + 7:]
events = cleanup_bs(events)
while '..' in events:
events = events.replace('..', '.')
ev_sents = [nltk.tokenize.word_tokenize(s.strip())
for s in nltk.tokenize.sent_tokenize(events) if len(s) > 3]
dates = []
for s in ev_sents:
if len(s) < 3:
continue
dates.append((date, s))
for i, listed in enumerate(dates):
date, event = listed
tagged = tagger.tag(event)
chunked = nltk.chunk.ne_chunk(tagged)
nametagged = retag(chunked)
tags = []
words = []
for word, tag in nametagged:
words.append(word)
tags.append(tag)
tree = parser.parse(tags)
dates[i] = {
'date': date,
'words': words,
'tags': tags,
'tree': tree,
}
if births:
if id != ilast:
births = births[births.index('</span>') + 7:births.rindex('<span')]
else:
births = births[births.index('</span>') + 7:]
births = cleanup_bs(births)
births = re.sub(r'([^)]*?)', '', births)
while '..' in births:
births = births.replace('..', '.')
br_sents = [nltk.tokenize.word_tokenize(s.strip())
for s in nltk.tokenize.sent_tokenize(births) if len(s) > 3]
blist = []
for s in br_sents:
while s[-1] == '.':
s = s[:-1]
if ',' in s:
name = ' '.join(s[:s.index(',')])
desc = ' '.join(s[s.index(',') + 1:])
else:
name = ' '.join(s)
desc = None
blist.append((date, name, desc))
if deaths:
deaths = deaths[deaths.index('</span>') + 7:]
deaths = cleanup_bs(deaths)
deaths = re.sub(r'([^)]*?)', '', deaths)
deaths = chopTail(deaths, 'Other pages')
deaths = chopTail(deaths, 'References')
while '..' in deaths:
deaths = deaths.replace('..', '.')
dth_sents = [nltk.tokenize.word_tokenize(s.strip())
for s in nltk.tokenize.sent_tokenize(deaths) if len(s) > 3]
dlist = []
for s in dth_sents:
while s[-1] == '.':
s = s[:-1]
if ',' in s:
name = ' '.join(s[:s.index(',')])
desc = ' '.join(s[s.index(',') + 1:])
else:
name = ' '.join(s)
desc = None
dlist.append((date, name, desc))
fout = open(os.path.join('data', 'parsed', page + '.pkl'), 'w')
pickle.dump({
'title': title,
'sentences': [],
'listed': dates,
'births': blist,
'deaths': dlist,
}, fout)
fout.close()
# /HACK
def get(page):
fin = open(os.path.join('data', 'parsed', page + '.pkl'), 'r')
result = pickle.load(fin)
fin.close()
return result
if __name__ == "__main__":
all_pages = []
exclude = []
date_pages = []
for c in CATEGORIES:
if c in categories:
for pg in categories[c]:
all_pages.append(pg)
for c in DATES:
if c in categories:
for pg in categories[c]:
date_pages.append(pg)
for c in EXCLUDE:
if c in categories:
for pg in categories[c]:
exclude.append(pg)
if sys.argv[1:]:
pages = filter(lambda c: c in all_pages, sys.argv[1:])
process(*pages)
dates = filter(lambda c: c in date_pages, sys.argv[1:])
process_dates(*dates)
else:
pages = [p for p in os.listdir(os.path.join('data', 'raw')) if p[0] != '.']
normal = filter(lambda c: c in all_pages and c not in exclude, pages)
process(*normal)
dates = filter(lambda c: c in date_pages and c not in exclude, pages)
process_dates(*dates)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from server import *
if __name__ == "__main__":
try:
server = Server()
server.start()
except KeyboardInterrupt:
server.stop()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
from cache import *
from connection import *
class Server:
"""
Cette classe représente le serveur proxy.
Celui-ci est en attente de connexions. Une fois qu'un client se connecte,
un objet Connection est créé et se charge de traiter sa requête.
"""
def __init__(self, port = 8080):
"""
Constructeur.
Initialise le socket et le cache. Par défaut, le proxy écoute sur le
port 8080.
"""
self.sock = socket.socket(socket.AF_INET)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(("", port))
self.sock.listen(5)
self.cache = Cache()
def start(self):
"""
Cette fonction démarre le proxy. Celui-ci est dès lors en attente
de connexions. Une fois qu'un client se connecte, un objet
Connection se charge de répondre au client.
"""
while True:
sock_client, ip_client = self.sock.accept()
print "Connexion de", ip_client, "au serveur."
connection = Connection(sock_client, ip_client, self.cache)
connection.start()
def stop(self):
"""
Cette fonction stop le proxy.
"""
self.sock.close()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import select
import datetime
from threading import Thread
BUFFER_SIZE = 8192
class Connection(Thread):
"""
Cette classe gère les connections client-proxy et proxy-serveur.
A chaque fois qu'un client se connecte au proxy, un thread est lancé
et se charge d'éffectuer la requête demandée. Lorsqu'une nouvelle
requête arrive, le résultat de celle-ci est stocké en cache.
Si dans le futur la demande est renouvelée, alors le proxy interroge
le serveur afin de savoir si il y a eu modification du résultat depuis.
Si oui, alors le proxy transmet la requête au serveur afin d'obtenir
le nouveau résultat, le cache est mis à jour et le résultat est transmis
au client. Dans le cas contraire, le résultat de la requête est directement
envoyé au client, sans passer par le serveur. Ceci permet d'économiser
de la bande-passante.
"""
def __init__(self, sock_client, ip_client, cache):
"""
Constructeur.
Initialise le thread et le buffer.
Défini le client et le cache utilisé.
"""
Thread.__init__(self)
self.client = sock_client
self.ip_client = ip_client
self.buffer = ""
self.cache = cache
def run(self):
"""
Surcharge de la fonction run de la classe Thread.
Cette fonction effectue les actions suivantes:
- Elle se charge d'obtenir la requête du client
- Elle vérifie si la requête est correct.
- Elle vérifie si la requête se trouvent deja en cache.
- Si la requête est en cache, elle vérifie si il y a eu modification.
- Si il y a eu modification, elle met à jour le cache et envoie
le nouveau résultat au client.
- Sinon elle envoie au client le résultat se trouvant en cache.
- Si la requête n'est pas en cache, elle envoie la requête au
serveur, ajoute le résultat en cache et l'envoie au client.
- Finalement, elle ferme la connection avec le client et le serveur.
"""
self.method, self.hname, self.path, self.prot = self.get_header()
print "Requête de", self.ip_client, ": \n"
if self.method in ("OPTIONS", "GET", "HEAD", "POST", "PUT",
"DELETE", "TRACE"):
self.connect(self.hname)
incache, content = self.cache.find(self.method, self.hname,
self.path)
if incache:
if self.modified(self.method,self.hname,self.path,
self.prot, content):
self.client.send(self.buffer)
self.cache.update(self.method, self.hname, self.path,
self.date, self.buffer)
self.buffer = ""
else:
self.client.send(content[4])
else:
print "Page pas en cache. \n"
self.send_request()
self.client.close()
self.host.close()
else:
self.client.close()
def get_header(self):
"""
Cette fonction extrait la ligne de requête du message envoyé par
le client. Elle retourne la méthode, le nom de domaine du serveur
à contacter, le chemin du fichier sur le serveur ainsi que le
protocol utilisé.
"""
while True:
self.buffer = self.buffer + self.client.recv(BUFFER_SIZE)
crlf = self.buffer.find('\n')
if crlf != -1:
break
print self.buffer
header = self.buffer[:crlf+1].split()
method = header[0]
tmp = header[1][7:]
plimit = tmp.find("/")
hname = tmp[:plimit]
path = tmp[plimit:]
prot = header[2]
self.buffer = self.buffer[crlf+1:]
return (method,hname,path,prot)
def connect(self, hostname):
"""
Cette fonction se charge de connecter le proxy au serveur distant.
Elle prend en paramètre le nom de domaine de l'hôte à contacter.
"""
(sock_type,_,_,_,ip_host) = socket.getaddrinfo(self.hname, 80)[0]
self.host = socket.socket(sock_type)
self.host.connect(ip_host)
print "Connexion avec", hostname, "établie. \n"
def modified(self, method, hname, path, prot, content):
"""
Cette fonction se charge d'aller vérifier si une page
a été modifiée depuis la dernière fois qu'elle a été
consultée.
Elle prend en paramètre la méthode, le nom de domaine de l'hôte à
contacter, le chemin du fichier dans l'arborescence du serveur, le
protocol utilisécet le contenu du cache pour cette requête.
Celle-ci retourne True si la page a été modifiée, False sinon.
"""
buff = self.buffer[:len(self.buffer)-2]
request = (method + " " + path + " " + prot
+ "\r\n" + buff + "If-Modified-Since: " + content[3]
+ "\r\n\r\n")
self.host.send(request)
response = ""
timeout = 20
i = 0
while True:
i = i + 1
(recv,_,error) = select.select([self.host],[],[self.host],3)
if error:
break
if recv:
temp = recv[0].recv(BUFFER_SIZE)
if temp:
response = response + temp
i = 0
if i == timeout:
break
index = response.find("\n")
rsp_header = response[:index]
code = rsp_header.split()[1]
if code == "304":
print "Page", hname + path, "en cache (non modifiée). \n"
self.buffer = ""
return False
else:
print "Page", hname + path, "en cache (modifiée). \n"
print "Nouvelle version: \n"
print response
self.date = self.get_date(response)
self.buffer = response
return True
def send_request(self):
"""
Cette fonction se charge d'envoyer la requête au serveur et de
transmettre le résultat au client.
"""
request = ('%s %s %s\n'%(self.method, self.path,
self.prot)+self.buffer)
self.host.send(request)
self.buffer = ''
self.get_response()
def get_response(self):
"""
Cette fonction se charge de recueillir la réponse du serveur.
Une fois la réponse reçue, le cache est mis à jour et la réponse
transmise au client.
"""
timeout = 20
i = 0
data = ""
while True:
i = i + 1
(recv,_,error) = select.select([self.host], [], [self.host],3)
if error:
break
if recv:
temp = recv[0].recv(BUFFER_SIZE)
if temp:
data = data + temp
self.client.send(temp)
i = 0
if i == timeout:
break
print "Réponse à", self.method, self.hname + self.path, self.prot, "\n"
print data
self.date = self.get_date(data)
self.cache.add(self.method, self.hname, self.path, self.date, data)
def get_date(self, header):
i = header.find("\r\n\r\n")
tab = header[:i].split("\r\n")[1:]
for line in tab:
sline = line.split(": ")
if sline[0] == "Last-Modified":
return sline[1]
now = datetime.datetime.now()
return now.strftime("%a, %d %b %Y %H:%M:%S GMT")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Cache:
"""
Cette classe représente le cache du proxy.
Elle bufferise toutes les requêtes passant par celui-ci.
"""
def __init__(self):
"""
Constructeur.
Initialise le buffer.
"""
self.pages = []
def add(self, method, hname, path, date, data):
"""
Cette fonction permet d'ajouter une requête et son résultat dans
le cache.
Elle prend en paramètre la méthode, le nom de domaine de l'hôte à
contacter, le chemin du fichier dans l'arborescence du serveur,
l'instant auquel la requête a été effectuée et le résultat de la
requête.
"""
print "Résultat de", method, hname + path, "ajouté en cache.\n"
tpl = [method, hname, path, date, data]
self.pages.append(tpl)
def find(self, method, hname, path):
"""
Cette fonction permet de chercher une requête dans le cache.
Elle prend en paramètre la méthode, le nom de domaine de l'hôte
à contacter et le chemin du fichier dans l'arborescence du serveur.
Elle retourne True si la requête est en cache, False sinon.
"""
for elem in self.pages:
a = elem[0]
b = elem[1]
c = elem[2]
if a == method and b == hname and c == path:
return (True, elem)
return False, list()
def update(self, method, hname, path, date, data):
"""
Cette fonction permet de mettre à jour le cache.
Elle prend en paramètre la méthode, le nom de domaine de l'hôte à
contacter, le chemin du fichier dans l'arborescence du serveur,
l'instant auquel la requête a été effectuée et le résultat de la
requête.
Les champs date et data de l'entrée (method, hname, path) seront ainsi
mis à jour.
"""
print "Résultat de", method, hname + path, "mis à jour en cache. \n"
for elem in self.pages:
a = elem[0]
b = elem[1]
c = elem[2]
if a == method and b == hname and c == path:
elem[3] = date
elem[4] = data
def __str__(self):
"""
Cette fonction retourne une représentation du cache sous
forme de chaine de caractères.
"""
i = 0
result = ""
for e in self.pages:
result = result + str(i) + ") "
result = result + e[0] + " " + e[1] + e[2] + " (" + e[3] + ")\n"
i = i + 1
return result
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from server import *
if __name__ == "__main__":
try:
server = Server()
server.start()
except KeyboardInterrupt:
server.stop()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import select
import datetime
from threading import Thread
BUFFER_SIZE = 8192
class Connection(Thread):
"""
Cette classe gère les connections client-proxy et proxy-serveur.
A chaque fois qu'un client se connecte au proxy, un thread est lancé
et se charge d'éffectuer la requête demandée. Lorsqu'une nouvelle
requête arrive, le résultat de celle-ci est stocké en cache.
Si dans le futur la demande est renouvelée, alors le proxy interroge
le serveur afin de savoir si il y a eu modification du résultat depuis.
Si oui, alors le proxy transmet la requête au serveur afin d'obtenir
le nouveau résultat, le cache est mis à jour et le résultat est transmis
au client. Dans le cas contraire, le résultat de la requête est directement
envoyé au client, sans passer par le serveur. Ceci permet d'économiser
de la bande-passante.
"""
def __init__(self, sock_client, ip_client, cache):
"""
Constructeur.
Initialise le thread et le buffer.
Défini le client et le cache utilisé.
"""
Thread.__init__(self)
self.client = sock_client
self.ip_client = ip_client
self.buffer = ""
self.cache = cache
def run(self):
"""
Surcharge de la fonction run de la classe Thread.
Cette fonction effectue les actions suivantes:
- Elle se charge d'obtenir la requête du client
- Elle vérifie si la requête est correct.
- Elle vérifie si la requête se trouvent deja en cache.
- Si la requête est en cache, elle vérifie si il y a eu modification.
- Si il y a eu modification, elle met à jour le cache et envoie
le nouveau résultat au client.
- Sinon elle envoie au client le résultat se trouvant en cache.
- Si la requête n'est pas en cache, elle envoie la requête au
serveur, ajoute le résultat en cache et l'envoie au client.
- Finalement, elle ferme la connection avec le client et le serveur.
"""
self.method, self.hname, self.path, self.prot = self.get_header()
print "Requête de", self.ip_client, ": \n"
if self.method in ("OPTIONS", "GET", "HEAD", "POST", "PUT",
"DELETE", "TRACE"):
self.connect(self.hname)
incache, content = self.cache.find(self.method, self.hname,
self.path)
if incache:
if self.modified(self.method,self.hname,self.path,
self.prot, content):
self.client.send(self.buffer)
self.cache.update(self.method, self.hname, self.path,
self.date, self.buffer)
self.buffer = ""
else:
self.client.send(content[4])
else:
print "Page pas en cache. \n"
self.send_request()
self.client.close()
self.host.close()
else:
self.client.close()
def get_header(self):
"""
Cette fonction extrait la ligne de requête du message envoyé par
le client. Elle retourne la méthode, le nom de domaine du serveur
à contacter, le chemin du fichier sur le serveur ainsi que le
protocol utilisé.
"""
while True:
self.buffer = self.buffer + self.client.recv(BUFFER_SIZE)
crlf = self.buffer.find('\n')
if crlf != -1:
break
print self.buffer
header = self.buffer[:crlf+1].split()
method = header[0]
tmp = header[1][7:]
plimit = tmp.find("/")
hname = tmp[:plimit]
path = tmp[plimit:]
prot = header[2]
self.buffer = self.buffer[crlf+1:]
return (method,hname,path,prot)
def connect(self, hostname):
"""
Cette fonction se charge de connecter le proxy au serveur distant.
Elle prend en paramètre le nom de domaine de l'hôte à contacter.
"""
(sock_type,_,_,_,ip_host) = socket.getaddrinfo(self.hname, 80)[0]
self.host = socket.socket(sock_type)
self.host.connect(ip_host)
print "Connexion avec", hostname, "établie. \n"
def modified(self, method, hname, path, prot, content):
"""
Cette fonction se charge d'aller vérifier si une page
a été modifiée depuis la dernière fois qu'elle a été
consultée.
Elle prend en paramètre la méthode, le nom de domaine de l'hôte à
contacter, le chemin du fichier dans l'arborescence du serveur, le
protocol utilisécet le contenu du cache pour cette requête.
Celle-ci retourne True si la page a été modifiée, False sinon.
"""
buff = self.buffer[:len(self.buffer)-2]
request = (method + " " + path + " " + prot
+ "\r\n" + buff + "If-Modified-Since: " + content[3]
+ "\r\n\r\n")
self.host.send(request)
response = ""
timeout = 20
i = 0
while True:
i = i + 1
(recv,_,error) = select.select([self.host],[],[self.host],3)
if error:
break
if recv:
temp = recv[0].recv(BUFFER_SIZE)
if temp:
response = response + temp
i = 0
if i == timeout:
break
index = response.find("\n")
rsp_header = response[:index]
code = rsp_header.split()[1]
if code == "304":
print "Page", hname + path, "en cache (non modifiée). \n"
self.buffer = ""
return False
else:
print "Page", hname + path, "en cache (modifiée). \n"
print "Nouvelle version: \n"
print response
self.date = self.get_date(response)
self.buffer = response
return True
def send_request(self):
"""
Cette fonction se charge d'envoyer la requête au serveur et de
transmettre le résultat au client.
"""
request = ('%s %s %s\n'%(self.method, self.path,
self.prot)+self.buffer)
self.host.send(request)
self.buffer = ''
self.get_response()
def get_response(self):
"""
Cette fonction se charge de recueillir la réponse du serveur.
Une fois la réponse reçue, le cache est mis à jour et la réponse
transmise au client.
"""
timeout = 20
i = 0
data = ""
while True:
i = i + 1
(recv,_,error) = select.select([self.host], [], [self.host],3)
if error:
break
if recv:
temp = recv[0].recv(BUFFER_SIZE)
if temp:
data = data + temp
self.client.send(temp)
i = 0
if i == timeout:
break
print "Réponse à", self.method, self.hname + self.path, self.prot, "\n"
print data
self.date = self.get_date(data)
self.cache.add(self.method, self.hname, self.path, self.date, data)
def get_date(self, header):
i = header.find("\r\n\r\n")
tab = header[:i].split("\r\n")[1:]
for line in tab:
sline = line.split(": ")
if sline[0] == "Last-Modified":
return sline[1]
now = datetime.datetime.now()
return now.strftime("%a, %d %b %Y %H:%M:%S GMT")
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
from cache import *
from connection import *
class Server:
"""
Cette classe représente le serveur proxy.
Celui-ci est en attente de connexions. Une fois qu'un client se connecte,
un objet Connection est créé et se charge de traiter sa requête.
"""
def __init__(self, port = 8080):
"""
Constructeur.
Initialise le socket et le cache. Par défaut, le proxy écoute sur le
port 8080.
"""
self.sock = socket.socket(socket.AF_INET)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(("", port))
self.sock.listen(5)
self.cache = Cache()
def start(self):
"""
Cette fonction démarre le proxy. Celui-ci est dès lors en attente
de connexions. Une fois qu'un client se connecte, un objet
Connection se charge de répondre au client.
"""
while True:
sock_client, ip_client = self.sock.accept()
print "Connexion de", ip_client, "au serveur."
connection = Connection(sock_client, ip_client, self.cache)
connection.start()
def stop(self):
"""
Cette fonction stop le proxy.
"""
self.sock.close()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Cache:
"""
Cette classe représente le cache du proxy.
Elle bufferise toutes les requêtes passant par celui-ci.
"""
def __init__(self):
"""
Constructeur.
Initialise le buffer.
"""
self.pages = []
def add(self, method, hname, path, date, data):
"""
Cette fonction permet d'ajouter une requête et son résultat dans
le cache.
Elle prend en paramètre la méthode, le nom de domaine de l'hôte à
contacter, le chemin du fichier dans l'arborescence du serveur,
l'instant auquel la requête a été effectuée et le résultat de la
requête.
"""
print "Résultat de", method, hname + path, "ajouté en cache.\n"
tpl = [method, hname, path, date, data]
self.pages.append(tpl)
def find(self, method, hname, path):
"""
Cette fonction permet de chercher une requête dans le cache.
Elle prend en paramètre la méthode, le nom de domaine de l'hôte
à contacter et le chemin du fichier dans l'arborescence du serveur.
Elle retourne True si la requête est en cache, False sinon.
"""
for elem in self.pages:
a = elem[0]
b = elem[1]
c = elem[2]
if a == method and b == hname and c == path:
return (True, elem)
return False, list()
def update(self, method, hname, path, date, data):
"""
Cette fonction permet de mettre à jour le cache.
Elle prend en paramètre la méthode, le nom de domaine de l'hôte à
contacter, le chemin du fichier dans l'arborescence du serveur,
l'instant auquel la requête a été effectuée et le résultat de la
requête.
Les champs date et data de l'entrée (method, hname, path) seront ainsi
mis à jour.
"""
print "Résultat de", method, hname + path, "mis à jour en cache. \n"
for elem in self.pages:
a = elem[0]
b = elem[1]
c = elem[2]
if a == method and b == hname and c == path:
elem[3] = date
elem[4] = data
def __str__(self):
"""
Cette fonction retourne une représentation du cache sous
forme de chaine de caractères.
"""
i = 0
result = ""
for e in self.pages:
result = result + str(i) + ") "
result = result + e[0] + " " + e[1] + e[2] + " (" + e[3] + ")\n"
i = i + 1
return result
| Python |
"""FlashTreePruner.py
by Peter Balogh (palexanderbalogh@yahoo.com)
August 1, 2007
designed to remove all unnecessary files and folders
from an exported source code tree of ActionScript files.
First: ONLY USE THIS ON AN EXPORTED TREE. Never use this on your actual project files, since IT DELETES LOTS OF THINGS.
Use Eclipse's context menu to do a team->export on your tree, pick a location safely far from your working project files, and so forth.
Move this script to the folder that contains your exported tree. In my case, I exported my tree to the desktop -- so I put this script on my Desktop too.
Open a terminal window and cd to where the python script (and your tree) can be found. In my case, Desktop.
Invoke the script like so:
python pathwalker.py com com/organic/new_template
Here are the four arguments of this command line:
1) python -- well, you use the python interpreter to execute the script, so the first arg is that executable.
2) the name of this script, obviously.
3) the top of the tree -- really, the name of the top directory. In my case, "com".
4) the folder inside the tree that contains your relevant project files. In the above example, I used a project called "com/organic/new_template" -- that folder contains some loose AS files and some folders containing the rest of my AS files.
The script will (rather verbosely but very quickly) walk through your AS files and make a list of all other files that are needed -- and it will walk through *those* files, too, making note of any dependencies.
At the end of the day, it will go through your entire tree (argument 3) and weed out any and all files that have no link to your project -- and it will prune out empty directories as well.
"""
# note -- we use os.path.normcase
# to automatically convert the slashes from / to \ in windows
import sys, os
allfiles = []
needed_files = []
def walkfun ( list_to_append, current_directory, file_list):
for fi in file_list:
f = os.path.join ( current_directory, fi )
if not os.path.isdir( f ):
print "for file " + f
list_to_append.append( f )
"""
for foldername in sys.argv[ 1: ]:
foldersize = 0
folderxmlstring = ""
os.path.walk( foldername, walkfun, foldername ) # recursively dig into each folder
folderxmlstring = "<folder name='" + foldername + "' size='" + str( foldersize ) + "' >" + folderxmlstring + "</folder>"
print "folderxmlstring=", folderxmlstring
totalxmlstring += folderxmlstring
totalsize += foldersize
"""
pathtop = sys.argv[ 1 ]
"""
The walk function helps you find all files in a directory tree. It takes a directory name, a callback function, and a data object that is passed on to the callback.
"""
os.path.walk( pathtop, walkfun, allfiles )
print "allfiles = ", allfiles
print "of length ", len(allfiles)
projecttop = sys.argv[ 2 ]
os.path.walk( projecttop, walkfun, needed_files )
print "neededfiles = ", needed_files
print "of length ", len( needed_files )
# convert file-path slashes to AS-style dots
# and remove the .as suffix
# so that we're comparing AS apples to apples
# i.e., AS would say com.utils.event.EventFacade.broadcast()
# so format our filenames thusly.
dotted_allfiles = [ os.path.normcase( unneeded_file.replace("/", ".").replace(".as", "")) for unneeded_file in allfiles if unneeded_file.count(".as") > 0]
dotted_neededfiles = [ os.path.normcase( needed_file.replace("/", ".").replace(".as", "")) for needed_file in needed_files if needed_file.count(".as") > 0]
print "dotted_allfiles = ", dotted_allfiles
print "dotted_neededfiles = ", dotted_neededfiles
firstfilename = dotted_neededfiles[ 0 ]
print "firstfilename = " + firstfilename
dnf_length_old = len( dotted_neededfiles )
daf_length_old = len( dotted_allfiles )
# two ways to compare each line of code:
# first, if it's an import, is the import's description
# matched by the dotted_file version of the file inside allfiles?
# i.e., do any of the dotted files' names contain the import description?
# second, if it *isn't* an import,
# does it contain the name of a dotted file?
def examinefile( dottedfilename ):
print "*****EXAMINING " + dottedfilename
somefilename = dottedfilename.replace(".", "/")
somefilename += ".as"
somefilename = os.path.normcase( somefilename )
f = open( somefilename, "r" )
print "You opened " + f.name
file_lines_list = f.readlines()
if len ( file_lines_list ) < 2:
#print "Didn't open " + somefilename + " right!!!"
#print "len ( file_lines_list ) = ", len ( file_lines_list )
#print "And the first 3 lines are ", file_lines_list[0:3]
# probably you're on a machine that expected one carriage return
# but found another.
# so let's assume it blobbed the whole file sans line returns.
file_lines_list = file_lines_list[0].split("\r")
if len ( file_lines_list ) < 2:
file_lines_list = file_lines_list[0].split("\n")
for line in file_lines_list:
#remove leading whitespace
line = line.lstrip()
# now, the first possibility
# is that this is an import line
# and is using a star.
# e.g., import com.utils.*
# in which case, we remove the star
# and then cycle through our allfiles
# and if the file's name contains the import description
# pull that file out of allfiles
# and put it into indirectly_needed_files
if line.count("import") > 0:
if line.find("import") == 0:
#it's an import statement
import_desc = line.split( "import")[1].lstrip()
#print "import_desc = " + import_desc
if import_desc.find("*") > -1:
# and it's using a star!
import_desc = import_desc.split("*")[0]
else:
import_desc = import_desc.split(";")[0]
# almost there!
# is the import_desc inside of the dotted file's name?
for dotted_file in dotted_allfiles:
if dotted_file.count( import_desc ) > 0:
#print "HEY! imPORTing " + dotted_file + " in " + line + " whose import description is " + import_desc
dotted_allfiles.remove( dotted_file )
dotted_neededfiles.append( dotted_file )
#print "length of dotted_allfiles = ", len( dotted_allfiles )
else:
#okay, it's not an import line.
# but what if it's using a fully-qualified path?
# e.g., com.utils.events.EventFacade.broadcast()...
# then we cycle through our known allfiles
# and see if any of them are mentioned in the code.
# if they are, add that name
# to indirectly-needed files
# and remove it from allfiles
for dotted_file in dotted_allfiles:
if line.count( dotted_file ) > 0:
print "HEY! dotted_file is " + dotted_file + " in " + line
dotted_allfiles.remove( dotted_file )
dotted_neededfiles.append( dotted_file )
f.close()
for dotted_neededfile in dotted_neededfiles:
examinefile( dotted_neededfile )
print "length of dotted_neededfiles went from ", dnf_length_old
print "to ", len( dotted_neededfiles )
dotted_neededfiles.sort()
for f in dotted_neededfiles:
print f
print "length of dotted_allfiles went from ", daf_length_old
print "to ", len( dotted_allfiles )
dotted_allfiles.sort()
for f in dotted_allfiles:
print f
allfiles = [ os.path.normcase( file.replace(".", "/") + ".as" ) for file in dotted_allfiles]
def killfunction ( list_to_append, current_directory, file_list):
for fi in file_list:
f = os.path.join ( current_directory, fi )
if not os.path.isdir( f ):
if allfiles.count( f ) > 0:
print "killing file " + f
os.remove( f )
else:
#It's a directory, so try to remove it
try:
os.removedirs( f )
except:
print "Can't remove non-empty dir " + f
os.path.walk( pathtop, killfunction, allfiles )
# that emptied the folders; now delete 'em
os.path.walk( pathtop, killfunction, allfiles )
| Python |
#!/usr/bin/env python
import logging
import os
import re
import time
import platform
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
from xmlrpclib import ServerProxy, Error
# -- createDaemon() code from: http://code.activestate.com/recipes/278731/
# Default daemon parameters.
# File mode creation mask of the daemon.
UMASK = 0
# Default working directory for the daemon.
WORKDIR = "/"
# Default maximum for the number of available file descriptors.
MAXFD = 1024
# The standard I/O file descriptors are redirected to /dev/null by default.
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
CWD = os.getcwd()
def createDaemon():
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
try:
# Fork a child process so the parent can exit. This returns control to
# the command-line or shell. It also guarantees that the child will not
# be a process group leader, since the child receives a new process ID
# and inherits the parent's process group ID. This step is required
# to insure that the next call to os.setsid is successful.
pid = os.fork()
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The first child.
# To become the session leader of this new session and the process group
# leader of the new process group, we call os.setsid(). The process is
# also guaranteed not to have a controlling terminal.
os.setsid()
# Is ignoring SIGHUP necessary?
#
# It's often suggested that the SIGHUP signal should be ignored before
# the second fork to avoid premature termination of the process. The
# reason is that when the first child terminates, all processes, e.g.
# the second child, in the orphaned group will be sent a SIGHUP.
#
# "However, as part of the session management system, there are exactly
# two cases where SIGHUP is sent on the death of a process:
#
# 1) When the process that dies is the session leader of a session that
# is attached to a terminal device, SIGHUP is sent to all processes
# in the foreground process group of that terminal device.
# 2) When the death of a process causes a process group to become
# orphaned, and one or more processes in the orphaned group are
# stopped, then SIGHUP and SIGCONT are sent to all members of the
# orphaned group." [2]
#
# The first case can be ignored since the child is guaranteed not to have
# a controlling terminal. The second case isn't so easy to dismiss.
# The process group is orphaned when the first child terminates and
# POSIX.1 requires that every STOPPED process in an orphaned process
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
# second child is not STOPPED though, we can safely forego ignoring the
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
#
# import signal # Set handlers for asynchronous events.
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
# Fork a second child and exit immediately to prevent zombies. This
# causes the second child process to be orphaned, making the init
# process responsible for its cleanup. And, since the first child is
# a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future (System V-
# based systems). This second fork guarantees that the child is no
# longer a session leader, preventing the daemon from ever acquiring
# a controlling terminal.
pid = os.fork() # Fork a second child.
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The second child.
# Since the current working directory may be a mounted filesystem, we
# avoid the issue of not being able to unmount the filesystem at
# shutdown time by changing it to the root directory.
os.chdir(WORKDIR)
# We probably don't want the file mode creation mask inherited from
# the parent, so we give the child complete control over permissions.
os.umask(UMASK)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
# exit() or _exit()?
# _exit is like exit(), but it doesn't call any functions registered
# with atexit (and on_exit) or any registered signal handlers. It also
# closes any open file descriptors. Using exit() may cause all stdio
# streams to be flushed twice and any temporary files may be unexpectedly
# removed. It's therefore recommended that child branches of a fork()
# and the parent branch(es) of a daemon use _exit().
os._exit(0) # Exit parent of the first child.
# Close all open file descriptors. This prevents the child from keeping
# open any file descriptors inherited from the parent. There is a variety
# of methods to accomplish this task. Three are listed below.
#
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
# number of open file descriptors to close. If it doesn't exists, use
# the default value (configurable).
#
# try:
# maxfd = os.sysconf("SC_OPEN_MAX")
# except (AttributeError, ValueError):
# maxfd = MAXFD
#
# OR
#
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
# maxfd = os.sysconf("SC_OPEN_MAX")
# else:
# maxfd = MAXFD
#
# OR
#
# Use the getrlimit method to retrieve the maximum file descriptor number
# that can be opened by this process. If there is not limit on the
# resource, use the default value.
#
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# Redirect the standard I/O file descriptors to the specified file. Since
# the daemon has no controlling terminal, most daemons redirect stdin,
# stdout, and stderr to /dev/null. This is done to prevent side-effects
# from reads and writes to the standard I/O file descriptors.
# This call to open is guaranteed to return the lowest file descriptor,
# which will be 0 (stdin), since it was closed above.
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
return(0)
# -- End of createDaemon() code from: http://code.activestate.com/recipes/278731/
class FCSH(object):
"""
FCSH wrapper.
Communicate with a fcsh process through a pipe, and transparently take
advantage of fcsh "cache" of compilation steps.
"""
PROMPT = '\n(fcsh)'
TARGET_ID_RE = re.compile('fcsh: Assigned ([0-9]+) as the compile target id')
def __init__(self):
if sys.platform.startswith("win"):
self.fcsh = Popen('%FLEX_HOME%\\bin\\fcsh', shell=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
else:
self.fcsh = Popen('LC_ALL=C "$FLEX_HOME"/bin/fcsh', shell=True,
close_fds=True, cwd=CWD,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
self.command_ids = {}
self.read_to_prompt()
def read_to_prompt(self):
"""
Reads fcsh output until the prompt is detected, and returns the collected
output
"""
output = ""
ch = self.fcsh.stdout.read(1)
while ch:
output += ch
if output.endswith(self.PROMPT):
break
ch = self.fcsh.stdout.read(1)
logging.debug("Found fcsh prompt, read %s", output)
return output
def run_command(self, cmd):
"""
Pass the command to fcsh. Automatically adds '\n' to the end of ``cmd``.
Also remembers the "compilation target id" of every passed command, to
take advantage of fcsh 'cache'. This means that if the ``'mxmlc foo.mxml'``
command is issued twice, the second time it actually executes
``"compile 1"`` (assuming that fcsh assigned 1 as the compilation id the
first time the command was issued)
The process described above is completely handled inside ``run_command``.
The client code doesn't have to do anything special.
"""
logging.debug("Running fcsh cmd: %s" % cmd)
if cmd in self.command_ids:
logging.debug("Found pre-existing id: %s" %
self.command_ids[cmd])
self.fcsh.stdin.write('compile %s\n' % self.command_ids[cmd])
else:
self.fcsh.stdin.write(cmd + "\n")
output = self.read_to_prompt()
# If the command didn't had an id, it should have one now
if not cmd in self.command_ids:
match = self.TARGET_ID_RE.search(output)
if match:
self.command_ids[cmd] = match.groups()[0]
logging.debug("Recording generated id: %s" %
self.command_ids[cmd])
return output
PORT = 2345
def configure_server_logging():
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='/tmp/fcshd.log',
filemode='w')
def run_server(as_daemon=True):
"""
Optionally daemonizes the process and starts an XML-RPC server to drive the
FCSH wrapper.
"""
if as_daemon:
retCode = createDaemon()
configure_server_logging()
procParams = """
return code = %s
process ID = %s
parent process ID = %s
process group ID = %s
session ID = %s
user ID = %s
effective user ID = %s
real group ID = %s
effective group ID = %s
""" % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),
os.getuid(), os.geteuid(), os.getgid(), os.getegid())
logging.info(procParams + "\n")
else:
configure_server_logging()
fcsh = FCSH()
logging.debug("FCSH initialized\n")
from SimpleXMLRPCServer import SimpleXMLRPCServer
server = SimpleXMLRPCServer(("localhost", PORT))
server.register_introspection_functions()
server.register_function(lambda cmd: fcsh.run_command(cmd), 'run_command')
server.register_function(lambda: os._exit(0), 'exit')
server.serve_forever()
def fcsh_server_proxy():
return ServerProxy("http://localhost:%d" % PORT)
def run_command(cmd):
import socket
server = fcsh_server_proxy()
try:
output = server.run_command(cmd)
except socket.error:
if sys.platform.startswith("win"):
print "Please start the server process in a different prompt using"
print "fcshd.py --start-server"
return 1
start_server()
output = server.run_command(cmd)
except Error, v:
print "XML-RPC Error:", Error, v
return 1
print output.encode('iso-8859-1') # Looks like flex outputs latin-1 sometimes
# Check if compilation worked:
if re.search(r'\.sw[cf] \([0-9]+ bytes\)', output):
return 0
else:
return 1
def start_server(as_daemon=True):
print "Starting the server, please wait..."
if as_daemon:
if os.fork() == 0:
run_server(as_daemon=True)
os._exit(0)
time.sleep(2) # Give time to child to start up the server
print "OK."
else:
print "Running fcshd.py server in the foreground. Press Ctrl-C to exit"
try:
run_server(as_daemon=False)
except KeyboardInterrupt:
print
print "Ctrl-C detected, exiting..."
def stop_server():
server = fcsh_server_proxy()
try:
server.exit()
except Error:
# The exit() method in the server never returns. Thus,
# it always trigger a RPC error.
pass
# But we can check that the server is down:
try:
server.run_command("dummy")
except:
pass
else:
print "Couldn't stop the server"
def parse_options(args):
parser = OptionParser()
parser.add_option('--stop-server', action="store_true", dest="stop",
default=False, help="Stops the FCSH server and exit")
parser.add_option('--start-server', action="store_true", dest="start",
default=False, help="Starts the FCSH server and exit")
parser.add_option('--foreground', action="store_true", dest="foreground",
default=platform.system() == 'Windows',
help="Starts the FCSH server in the foreground. This is"
"the default behavior in Windows")
return parser.parse_args(args)
def main(args):
if 'FLEX_HOME' not in os.environ:
print """
Please set the FLEX_HOME environment variable pointing to the location of
the FLEX SDK"""
return 1
options, args = parse_options(args)
if options.start:
start_server(as_daemon=not options.foreground)
elif options.stop:
stop_server()
else:
command = " ".join(args[1:]).strip()
if not command:
command = "help"
return run_command(command)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
import logging
import os
import re
import time
import platform
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
from xmlrpclib import ServerProxy, Error
# -- createDaemon() code from: http://code.activestate.com/recipes/278731/
# Default daemon parameters.
# File mode creation mask of the daemon.
UMASK = 0
# Default working directory for the daemon.
WORKDIR = "/"
# Default maximum for the number of available file descriptors.
MAXFD = 1024
# The standard I/O file descriptors are redirected to /dev/null by default.
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
CWD = os.getcwd()
def createDaemon():
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
try:
# Fork a child process so the parent can exit. This returns control to
# the command-line or shell. It also guarantees that the child will not
# be a process group leader, since the child receives a new process ID
# and inherits the parent's process group ID. This step is required
# to insure that the next call to os.setsid is successful.
pid = os.fork()
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The first child.
# To become the session leader of this new session and the process group
# leader of the new process group, we call os.setsid(). The process is
# also guaranteed not to have a controlling terminal.
os.setsid()
# Is ignoring SIGHUP necessary?
#
# It's often suggested that the SIGHUP signal should be ignored before
# the second fork to avoid premature termination of the process. The
# reason is that when the first child terminates, all processes, e.g.
# the second child, in the orphaned group will be sent a SIGHUP.
#
# "However, as part of the session management system, there are exactly
# two cases where SIGHUP is sent on the death of a process:
#
# 1) When the process that dies is the session leader of a session that
# is attached to a terminal device, SIGHUP is sent to all processes
# in the foreground process group of that terminal device.
# 2) When the death of a process causes a process group to become
# orphaned, and one or more processes in the orphaned group are
# stopped, then SIGHUP and SIGCONT are sent to all members of the
# orphaned group." [2]
#
# The first case can be ignored since the child is guaranteed not to have
# a controlling terminal. The second case isn't so easy to dismiss.
# The process group is orphaned when the first child terminates and
# POSIX.1 requires that every STOPPED process in an orphaned process
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
# second child is not STOPPED though, we can safely forego ignoring the
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
#
# import signal # Set handlers for asynchronous events.
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
# Fork a second child and exit immediately to prevent zombies. This
# causes the second child process to be orphaned, making the init
# process responsible for its cleanup. And, since the first child is
# a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future (System V-
# based systems). This second fork guarantees that the child is no
# longer a session leader, preventing the daemon from ever acquiring
# a controlling terminal.
pid = os.fork() # Fork a second child.
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The second child.
# Since the current working directory may be a mounted filesystem, we
# avoid the issue of not being able to unmount the filesystem at
# shutdown time by changing it to the root directory.
os.chdir(WORKDIR)
# We probably don't want the file mode creation mask inherited from
# the parent, so we give the child complete control over permissions.
os.umask(UMASK)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
# exit() or _exit()?
# _exit is like exit(), but it doesn't call any functions registered
# with atexit (and on_exit) or any registered signal handlers. It also
# closes any open file descriptors. Using exit() may cause all stdio
# streams to be flushed twice and any temporary files may be unexpectedly
# removed. It's therefore recommended that child branches of a fork()
# and the parent branch(es) of a daemon use _exit().
os._exit(0) # Exit parent of the first child.
# Close all open file descriptors. This prevents the child from keeping
# open any file descriptors inherited from the parent. There is a variety
# of methods to accomplish this task. Three are listed below.
#
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
# number of open file descriptors to close. If it doesn't exists, use
# the default value (configurable).
#
# try:
# maxfd = os.sysconf("SC_OPEN_MAX")
# except (AttributeError, ValueError):
# maxfd = MAXFD
#
# OR
#
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
# maxfd = os.sysconf("SC_OPEN_MAX")
# else:
# maxfd = MAXFD
#
# OR
#
# Use the getrlimit method to retrieve the maximum file descriptor number
# that can be opened by this process. If there is not limit on the
# resource, use the default value.
#
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# Redirect the standard I/O file descriptors to the specified file. Since
# the daemon has no controlling terminal, most daemons redirect stdin,
# stdout, and stderr to /dev/null. This is done to prevent side-effects
# from reads and writes to the standard I/O file descriptors.
# This call to open is guaranteed to return the lowest file descriptor,
# which will be 0 (stdin), since it was closed above.
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
return(0)
# -- End of createDaemon() code from: http://code.activestate.com/recipes/278731/
class FCSH(object):
"""
FCSH wrapper.
Communicate with a fcsh process through a pipe, and transparently take
advantage of fcsh "cache" of compilation steps.
"""
PROMPT = '\n(fcsh)'
TARGET_ID_RE = re.compile('fcsh: Assigned ([0-9]+) as the compile target id')
def __init__(self):
if sys.platform.startswith("win"):
self.fcsh = Popen('%FLEX_HOME%\\bin\\fcsh', shell=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
else:
self.fcsh = Popen('LC_ALL=C "$FLEX_HOME"/bin/fcsh', shell=True,
close_fds=True, cwd=CWD,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
self.command_ids = {}
self.read_to_prompt()
def read_to_prompt(self):
"""
Reads fcsh output until the prompt is detected, and returns the collected
output
"""
output = ""
ch = self.fcsh.stdout.read(1)
while ch:
output += ch
if output.endswith(self.PROMPT):
break
ch = self.fcsh.stdout.read(1)
logging.debug("Found fcsh prompt, read %s", output)
return output
def run_command(self, cmd):
"""
Pass the command to fcsh. Automatically adds '\n' to the end of ``cmd``.
Also remembers the "compilation target id" of every passed command, to
take advantage of fcsh 'cache'. This means that if the ``'mxmlc foo.mxml'``
command is issued twice, the second time it actually executes
``"compile 1"`` (assuming that fcsh assigned 1 as the compilation id the
first time the command was issued)
The process described above is completely handled inside ``run_command``.
The client code doesn't have to do anything special.
"""
logging.debug("Running fcsh cmd: %s" % cmd)
if cmd in self.command_ids:
logging.debug("Found pre-existing id: %s" %
self.command_ids[cmd])
self.fcsh.stdin.write('compile %s\n' % self.command_ids[cmd])
else:
self.fcsh.stdin.write(cmd + "\n")
output = self.read_to_prompt()
# If the command didn't had an id, it should have one now
if not cmd in self.command_ids:
match = self.TARGET_ID_RE.search(output)
if match:
self.command_ids[cmd] = match.groups()[0]
logging.debug("Recording generated id: %s" %
self.command_ids[cmd])
return output
PORT = 2345
def configure_server_logging():
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='/tmp/fcshd.log',
filemode='w')
def run_server(as_daemon=True):
"""
Optionally daemonizes the process and starts an XML-RPC server to drive the
FCSH wrapper.
"""
if as_daemon:
retCode = createDaemon()
configure_server_logging()
procParams = """
return code = %s
process ID = %s
parent process ID = %s
process group ID = %s
session ID = %s
user ID = %s
effective user ID = %s
real group ID = %s
effective group ID = %s
""" % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),
os.getuid(), os.geteuid(), os.getgid(), os.getegid())
logging.info(procParams + "\n")
else:
configure_server_logging()
fcsh = FCSH()
logging.debug("FCSH initialized\n")
from SimpleXMLRPCServer import SimpleXMLRPCServer
server = SimpleXMLRPCServer(("localhost", PORT))
server.register_introspection_functions()
server.register_function(lambda cmd: fcsh.run_command(cmd), 'run_command')
server.register_function(lambda: os._exit(0), 'exit')
server.serve_forever()
def fcsh_server_proxy():
return ServerProxy("http://localhost:%d" % PORT)
def run_command(cmd):
import socket
server = fcsh_server_proxy()
try:
output = server.run_command(cmd)
except socket.error:
if sys.platform.startswith("win"):
print "Please start the server process in a different prompt using"
print "fcshd.py --start-server"
return 1
start_server()
output = server.run_command(cmd)
except Error, v:
print "XML-RPC Error:", Error, v
return 1
print output.encode('iso-8859-1') # Looks like flex outputs latin-1 sometimes
# Check if compilation worked:
if re.search(r'\.sw[cf] \([0-9]+ bytes\)', output):
return 0
else:
return 1
def start_server(as_daemon=True):
print "Starting the server, please wait..."
if as_daemon:
if os.fork() == 0:
run_server(as_daemon=True)
os._exit(0)
time.sleep(2) # Give time to child to start up the server
print "OK."
else:
print "Running fcshd.py server in the foreground. Press Ctrl-C to exit"
try:
run_server(as_daemon=False)
except KeyboardInterrupt:
print
print "Ctrl-C detected, exiting..."
def stop_server():
server = fcsh_server_proxy()
try:
server.exit()
except Error:
# The exit() method in the server never returns. Thus,
# it always trigger a RPC error.
pass
# But we can check that the server is down:
try:
server.run_command("dummy")
except:
pass
else:
print "Couldn't stop the server"
def parse_options(args):
parser = OptionParser()
parser.add_option('--stop-server', action="store_true", dest="stop",
default=False, help="Stops the FCSH server and exit")
parser.add_option('--start-server', action="store_true", dest="start",
default=False, help="Starts the FCSH server and exit")
parser.add_option('--foreground', action="store_true", dest="foreground",
default=platform.system() == 'Windows',
help="Starts the FCSH server in the foreground. This is"
"the default behavior in Windows")
return parser.parse_args(args)
def main(args):
if 'FLEX_HOME' not in os.environ:
print """
Please set the FLEX_HOME environment variable pointing to the location of
the FLEX SDK"""
return 1
options, args = parse_options(args)
if options.start:
start_server(as_daemon=not options.foreground)
elif options.stop:
stop_server()
else:
command = " ".join(args[1:]).strip()
if not command:
command = "help"
return run_command(command)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
# Copyright (C) 2011 Victor Semionov <vsemionov@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of the contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wx
from flea_wdr import *
app_name = "FLEA"
app_version = "0.1-dev"
copyright = "Copyright (C) 2011 Victor Semionov <vsemionov@gmail.com>"
rights = "All rights reserved."
# WDR: classes
class FuelingDialog(wx.Dialog):
def __init__(self, parent, id, title,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.DEFAULT_DIALOG_STYLE ):
wx.Dialog.__init__(self, parent, id, title, pos, size, style)
# WDR: dialog function FuelingDialogFunc for FuelingDialog
FuelingDialogFunc( self, True )
# WDR: handler declarations for FuelingDialog
wx.EVT_BUTTON(self, wx.ID_OK, self.OnOk)
# WDR: methods for FuelingDialog
def Validate(self, win):
return True
def TransferDataToWindow(self):
return True
def TransferDataFromWindow(self):
return True
# WDR: handler implementations for FuelingDialog
def OnOk(self, event):
event.Skip(True)
class EntriesDialog(wx.Dialog):
def __init__(self, parent, id, title,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.DEFAULT_DIALOG_STYLE ):
wx.Dialog.__init__(self, parent, id, title, pos, size, style)
# WDR: dialog function EntriesDialogFunc for EntriesDialog
EntriesDialogFunc( self, True )
# WDR: handler declarations for EntriesDialog
wx.EVT_BUTTON(self, ID_ADD, self.OnAdd)
wx.EVT_BUTTON(self, ID_EDIT, self.OnEdit)
wx.EVT_BUTTON(self, ID_DELETE, self.OnDelete)
# WDR: methods for EntriesDialog
# WDR: handler implementations for EntriesDialog
def OnAdd(self, event):
pass
def OnEdit(self, event):
pass
def OnDelete(self, event):
pass
class FLEAFrame(wx.Frame):
def __init__(self, parent, id, title,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.DEFAULT_FRAME_STYLE ):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
self.CreateMyMenuBar()
self.CreateMyToolBar()
self.CreateStatusBar(1)
self.SetStatusText("Welcome!")
# insert main window here
toolbar = self.GetToolBar()
toolbar.FindControl(ID_FILTER_VEHICLES).Selection = 0
toolbar.FindControl(ID_FILTER_FUELS).Selection = 0
fuelings = wx.ListCtrl(self, style=wx.LC_REPORT)
fuelings.InsertColumn(0, "Vehicle")
fuelings.InsertColumn(1, "Fuel")
fuelings.InsertColumn(2, "Quantity")
fuelings.InsertColumn(3, "Odometer")
fuelings.InsertColumn(4, "Date")
self.fuelings = fuelings
# WDR: handler declarations for FLEAFrame
wx.EVT_TOOL(self, ID_ADD_FUELING, self.OnAdd)
wx.EVT_CHOICE(self, ID_FILTER_VEHICLES, self.OnFilterVehicles)
wx.EVT_CHOICE(self, ID_FILTER_FUELS, self.OnFilterFuels)
wx.EVT_MENU(self, ID_VEHICLES, self.OnVehicles)
wx.EVT_MENU(self, ID_VEHICLES, self.OnFuels)
wx.EVT_MENU(self, ID_ADD_FUELING, self.OnAdd)
wx.EVT_MENU(self, ID_EDIT_FUELING, self.OnEdit)
wx.EVT_MENU(self, ID_DELETE_FUELING, self.OnDelete)
wx.EVT_MENU(self, wx.ID_ABOUT, self.OnAbout)
wx.EVT_MENU(self, wx.ID_EXIT, self.OnQuit)
wx.EVT_CLOSE(self, self.OnCloseWindow)
wx.EVT_SIZE(self, self.OnSize)
wx.EVT_UPDATE_UI(self, -1, self.OnUpdateUI)
# WDR: methods for FLEAFrame
def CreateMyMenuBar(self):
self.SetMenuBar( MyMenuBarFunc() )
def CreateMyToolBar(self):
tb = self.CreateToolBar(wx.TB_HORIZONTAL|wx.NO_BORDER)
MyToolBarFunc( tb )
# WDR: handler implementations for FLEAFrame
def OnFilterVehicles(self, event):
pass
def OnFilterFuels(self, event):
pass
def OnVehicles(self, event):
dlg = EntriesDialog(self, -1, "Vehicles")
dlg.CenterOnParent()
dlg.ShowModal()
dlg.Destroy()
def OnFuels(self, event):
dlg = EntriesDialog(self, -1, "Fuels")
dlg.CenterOnParent()
dlg.ShowModal()
dlg.Destroy()
def OnAdd(self, event):
dlg = FuelingDialog(self, -1, "Add Fueling")
dlg.CenterOnParent()
dlg.ShowModal()
dlg.Destroy()
def OnEdit(self, event):
pass
def OnDelete(self, event):
pass
def OnAbout(self, event):
dialog = wx.MessageDialog(self,
"Welcome to {} {}\n\n{}\n{}".format(app_name, app_version, copyright, rights),
"About {}".format(app_name),
wx.OK|wx.ICON_INFORMATION)
dialog.CenterOnParent()
dialog.ShowModal()
dialog.Destroy()
def OnQuit(self, event):
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
def OnSize(self, event):
event.Skip(True)
def OnUpdateUI(self, event):
event.Skip(True)
#----------------------------------------------------------------------------
class FLEA(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame = FLEAFrame(None, -1, app_name, size=[800,600])
frame.Center()
frame.Show(True)
return True
#----------------------------------------------------------------------------
app = FLEA(True)
app.MainLoop()
| Python |
#!/usr/bin/env python
# Copyright (C) 2011 Victor Semionov <vsemionov@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of the contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wx
from flea_wdr import *
app_name = "FLEA"
app_version = "0.1-dev"
copyright = "Copyright (C) 2011 Victor Semionov <vsemionov@gmail.com>"
rights = "All rights reserved."
# WDR: classes
class FuelingDialog(wx.Dialog):
def __init__(self, parent, id, title,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.DEFAULT_DIALOG_STYLE ):
wx.Dialog.__init__(self, parent, id, title, pos, size, style)
# WDR: dialog function FuelingDialogFunc for FuelingDialog
FuelingDialogFunc( self, True )
# WDR: handler declarations for FuelingDialog
wx.EVT_BUTTON(self, wx.ID_OK, self.OnOk)
# WDR: methods for FuelingDialog
def Validate(self, win):
return True
def TransferDataToWindow(self):
return True
def TransferDataFromWindow(self):
return True
# WDR: handler implementations for FuelingDialog
def OnOk(self, event):
event.Skip(True)
class EntriesDialog(wx.Dialog):
def __init__(self, parent, id, title,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.DEFAULT_DIALOG_STYLE ):
wx.Dialog.__init__(self, parent, id, title, pos, size, style)
# WDR: dialog function EntriesDialogFunc for EntriesDialog
EntriesDialogFunc( self, True )
# WDR: handler declarations for EntriesDialog
wx.EVT_BUTTON(self, ID_ADD, self.OnAdd)
wx.EVT_BUTTON(self, ID_EDIT, self.OnEdit)
wx.EVT_BUTTON(self, ID_DELETE, self.OnDelete)
# WDR: methods for EntriesDialog
# WDR: handler implementations for EntriesDialog
def OnAdd(self, event):
pass
def OnEdit(self, event):
pass
def OnDelete(self, event):
pass
class FLEAFrame(wx.Frame):
def __init__(self, parent, id, title,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.DEFAULT_FRAME_STYLE ):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
self.CreateMyMenuBar()
self.CreateMyToolBar()
self.CreateStatusBar(1)
self.SetStatusText("Welcome!")
# insert main window here
toolbar = self.GetToolBar()
toolbar.FindControl(ID_FILTER_VEHICLES).Selection = 0
toolbar.FindControl(ID_FILTER_FUELS).Selection = 0
fuelings = wx.ListCtrl(self, style=wx.LC_REPORT)
fuelings.InsertColumn(0, "Vehicle")
fuelings.InsertColumn(1, "Fuel")
fuelings.InsertColumn(2, "Quantity")
fuelings.InsertColumn(3, "Odometer")
fuelings.InsertColumn(4, "Date")
self.fuelings = fuelings
# WDR: handler declarations for FLEAFrame
wx.EVT_TOOL(self, ID_ADD_FUELING, self.OnAdd)
wx.EVT_CHOICE(self, ID_FILTER_VEHICLES, self.OnFilterVehicles)
wx.EVT_CHOICE(self, ID_FILTER_FUELS, self.OnFilterFuels)
wx.EVT_MENU(self, ID_VEHICLES, self.OnVehicles)
wx.EVT_MENU(self, ID_VEHICLES, self.OnFuels)
wx.EVT_MENU(self, ID_ADD_FUELING, self.OnAdd)
wx.EVT_MENU(self, ID_EDIT_FUELING, self.OnEdit)
wx.EVT_MENU(self, ID_DELETE_FUELING, self.OnDelete)
wx.EVT_MENU(self, wx.ID_ABOUT, self.OnAbout)
wx.EVT_MENU(self, wx.ID_EXIT, self.OnQuit)
wx.EVT_CLOSE(self, self.OnCloseWindow)
wx.EVT_SIZE(self, self.OnSize)
wx.EVT_UPDATE_UI(self, -1, self.OnUpdateUI)
# WDR: methods for FLEAFrame
def CreateMyMenuBar(self):
self.SetMenuBar( MyMenuBarFunc() )
def CreateMyToolBar(self):
tb = self.CreateToolBar(wx.TB_HORIZONTAL|wx.NO_BORDER)
MyToolBarFunc( tb )
# WDR: handler implementations for FLEAFrame
def OnFilterVehicles(self, event):
pass
def OnFilterFuels(self, event):
pass
def OnVehicles(self, event):
dlg = EntriesDialog(self, -1, "Vehicles")
dlg.CenterOnParent()
dlg.ShowModal()
dlg.Destroy()
def OnFuels(self, event):
dlg = EntriesDialog(self, -1, "Fuels")
dlg.CenterOnParent()
dlg.ShowModal()
dlg.Destroy()
def OnAdd(self, event):
dlg = FuelingDialog(self, -1, "Add Fueling")
dlg.CenterOnParent()
dlg.ShowModal()
dlg.Destroy()
def OnEdit(self, event):
pass
def OnDelete(self, event):
pass
def OnAbout(self, event):
dialog = wx.MessageDialog(self,
"Welcome to {} {}\n\n{}\n{}".format(app_name, app_version, copyright, rights),
"About {}".format(app_name),
wx.OK|wx.ICON_INFORMATION)
dialog.CenterOnParent()
dialog.ShowModal()
dialog.Destroy()
def OnQuit(self, event):
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
def OnSize(self, event):
event.Skip(True)
def OnUpdateUI(self, event):
event.Skip(True)
#----------------------------------------------------------------------------
class FLEA(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame = FLEAFrame(None, -1, app_name, size=[800,600])
frame.Center()
frame.Show(True)
return True
#----------------------------------------------------------------------------
app = FLEA(True)
app.MainLoop()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'afshar@google.com (Ali Afshar)'
import os
import httplib2
import sessions
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from apiclient.discovery import build_from_document
from apiclient.http import MediaUpload
from oauth2client import client
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.appengine import simplejson as json
APIS_BASE = 'https://www.googleapis.com'
ALL_SCOPES = ('https://www.googleapis.com/auth/drive.file '
'https://www.googleapis.com/auth/userinfo.email '
'https://www.googleapis.com/auth/userinfo.profile')
CODE_PARAMETER = 'code'
STATE_PARAMETER = 'state'
SESSION_SECRET = open('session.secret').read()
DRIVE_DISCOVERY_DOC = open('drive.json').read()
USERS_DISCOVERY_DOC = open('users.json').read()
class Credentials(db.Model):
"""Datastore entity for storing OAuth2.0 credentials."""
credentials = CredentialsProperty()
def CreateOAuthFlow(request):
"""Create OAuth2.0 flow controller
Args:
request: HTTP request to create OAuth2.0 flow for
Returns:
OAuth2.0 Flow instance suitable for performing OAuth2.0.
"""
flow = client.flow_from_clientsecrets('client-debug.json', scope='')
flow.redirect_uri = request.url.split('?', 1)[0].rstrip('/')
return flow
def GetCodeCredentials(request):
"""Create OAuth2.0 credentials by extracting a code and performing OAuth2.0.
Args:
request: HTTP request used for extracting an authorization code.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
code = request.get(CODE_PARAMETER)
if code:
oauth_flow = CreateOAuthFlow(request)
creds = oauth_flow.step2_exchange(code)
users_service = CreateService(USERS_DISCOVERY_DOC, creds)
userid = users_service.userinfo().get().execute().get('id')
request.session.set_secure_cookie(name='userid', value=userid)
StorageByKeyName(Credentials, userid, 'credentials').put(creds)
return creds
def GetSessionCredentials(request):
"""Get OAuth2.0 credentials for an HTTP session.
Args:
request: HTTP request to use session from.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
userid = request.session.get_secure_cookie(name='userid')
if userid:
creds = StorageByKeyName(Credentials, userid, 'credentials').get()
if creds and not creds.invalid:
return creds
def CreateService(discovery_doc, creds):
"""Create a Google API service.
Args:
discovery_doc: Discovery doc used to configure service.
creds: Credentials used to authorize service.
Returns:
Authorized Google API service.
"""
http = httplib2.Http()
creds.authorize(http)
return build_from_document(discovery_doc, APIS_BASE, http=http)
def RedirectAuth(handler):
"""Redirect a handler to an authorization page.
Args:
handler: webapp.RequestHandler to redirect.
"""
flow = CreateOAuthFlow(handler.request)
flow.scope = ALL_SCOPES
uri = flow.step1_get_authorize_url(flow.redirect_uri)
handler.redirect(uri)
def CreateDrive(handler):
"""Create a fully authorized drive service for this handler.
Args:
handler: RequestHandler from which drive service is generated.
Returns:
Authorized drive service, generated from the handler request.
"""
request = handler.request
request.session = sessions.LilCookies(handler, SESSION_SECRET)
creds = GetCodeCredentials(request) or GetSessionCredentials(request)
if creds:
return CreateService(DRIVE_DISCOVERY_DOC, creds)
else:
RedirectAuth(handler)
def ServiceEnabled(view):
"""Decorator to inject an authorized service into an HTTP handler.
Args:
view: HTTP request handler method.
Returns:
Decorated handler which accepts the service as a parameter.
"""
def ServiceDecoratedView(handler, view=view):
service = CreateDrive(handler)
response_data = view(handler, service)
handler.response.headers['Content-Type'] = 'text/html'
handler.response.out.write(response_data)
return ServiceDecoratedView
def ServiceEnabledJson(view):
"""Decorator to inject an authorized service into a JSON HTTP handler.
Args:
view: HTTP request handler method.
Returns:
Decorated handler which accepts the service as a parameter.
"""
def ServiceDecoratedView(handler, view=view):
service = CreateDrive(handler)
if handler.request.body:
data = json.loads(handler.request.body)
else:
data = None
response_data = json.dumps(view(handler, service, data))
handler.response.headers['Content-Type'] = 'application/json'
handler.response.out.write(response_data)
return ServiceDecoratedView
class DriveState(object):
"""Store state provided by Drive."""
def __init__(self, state):
self.ParseState(state)
@classmethod
def FromRequest(cls, request):
"""Create a Drive State instance from an HTTP request.
Args:
cls: Type this class method is called against.
request: HTTP request.
"""
return DriveState(request.get(STATE_PARAMETER))
def ParseState(self, state):
"""Parse a state parameter and set internal values.
Args:
state: State parameter to parse.
"""
if state.startswith('{'):
self.ParseJsonState(state)
else:
self.ParsePlainState(state)
def ParseJsonState(self, state):
"""Parse a state parameter that is JSON.
Args:
state: State parameter to parse
"""
state_data = json.loads(state)
self.action = state_data['action']
self.ids = map(str, state_data.get('ids', []))
def ParsePlainState(self, state):
"""Parse a state parameter that is a plain resource id or missing.
Args:
state: State parameter to parse
"""
if state:
self.action = 'open'
self.ids = [state]
else:
self.action = 'create'
self.ids = []
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed plain text:
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=256*1024, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
def RenderTemplate(name, **context):
"""Render a named template in a context.
Args:
name: Template name.
context: Keyword arguments to render as template variables.
"""
return template.render(name, context)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'afshar@google.com (Ali Afshar)'
# Add the library location to the path
import sys
sys.path.insert(0, 'lib')
import os
import httplib2
import sessions
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from apiclient.discovery import build
from apiclient.http import MediaUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.appengine import simplejson as json
ALL_SCOPES = ('https://www.googleapis.com/auth/drive.file '
'https://www.googleapis.com/auth/userinfo.email '
'https://www.googleapis.com/auth/userinfo.profile')
def SibPath(name):
"""Generate a path that is a sibling of this file.
Args:
name: Name of sibling file.
Returns:
Path to sibling file.
"""
return os.path.join(os.path.dirname(__file__), name)
# Load the secret that is used for client side sessions
# Create one of these for yourself with, for example:
# python -c "import os; print os.urandom(64)" > session-secret
SESSION_SECRET = open(SibPath('session.secret')).read()
INDEX_HTML = open(SibPath('index.html')).read()
class Credentials(db.Model):
"""Datastore entity for storing OAuth2.0 credentials.
The CredentialsProperty is provided by the Google API Python Client, and is
used by the Storage classes to store OAuth 2.0 credentials in the data store."""
credentials = CredentialsProperty()
def CreateService(service, version, creds):
"""Create a Google API service.
Load an API service from a discovery document and authorize it with the
provided credentials.
Args:
service: Service name (e.g 'drive', 'oauth2').
version: Service version (e.g 'v1').
creds: Credentials used to authorize service.
Returns:
Authorized Google API service.
"""
# Instantiate an Http instance
http = httplib2.Http()
# Authorize the Http instance with the passed credentials
creds.authorize(http)
# Build a service from the passed discovery document path
return build(service, version, http=http)
class DriveState(object):
"""Store state provided by Drive."""
def __init__(self, state):
"""Create a new instance of drive state.
Parse and load the JSON state parameter.
Args:
state: State query parameter as a string.
"""
if state:
state_data = json.loads(state)
self.action = state_data['action']
self.ids = map(str, state_data.get('ids', []))
else:
self.action = 'create'
self.ids = []
@classmethod
def FromRequest(cls, request):
"""Create a Drive State instance from an HTTP request.
Args:
cls: Type this class method is called against.
request: HTTP request.
"""
return DriveState(request.get('state'))
class BaseDriveHandler(webapp.RequestHandler):
"""Base request handler for drive applications.
Adds Authorization support for Drive.
"""
def CreateOAuthFlow(self):
"""Create OAuth2.0 flow controller
This controller can be used to perform all parts of the OAuth 2.0 dance
including exchanging an Authorization code.
Args:
request: HTTP request to create OAuth2.0 flow for
Returns:
OAuth2.0 Flow instance suitable for performing OAuth2.0.
"""
flow = flow_from_clientsecrets('client_secrets.json', scope='')
# Dynamically set the redirect_uri based on the request URL. This is extremely
# convenient for debugging to an alternative host without manually setting the
# redirect URI.
flow.redirect_uri = self.request.url.split('?', 1)[0].rsplit('/', 1)[0]
return flow
def GetCodeCredentials(self):
"""Create OAuth 2.0 credentials by extracting a code and performing OAuth2.0.
The authorization code is extracted form the URI parameters. If it is absent,
None is returned immediately. Otherwise, if it is present, it is used to
perform step 2 of the OAuth 2.0 web server flow.
Once a token is received, the user information is fetched from the userinfo
service and stored in the session. The token is saved in the datastore against
the user ID received from the userinfo service.
Args:
request: HTTP request used for extracting an authorization code and the
session information.
Returns:
OAuth2.0 credentials suitable for authorizing clients or None if
Authorization could not take place.
"""
# Other frameworks use different API to get a query parameter.
code = self.request.get('code')
if not code:
# returns None to indicate that no code was passed from Google Drive.
return None
# Auth flow is a controller that is loaded with the client information,
# including client_id, client_secret, redirect_uri etc
oauth_flow = self.CreateOAuthFlow()
# Perform the exchange of the code. If there is a failure with exchanging
# the code, return None.
try:
creds = oauth_flow.step2_exchange(code)
except FlowExchangeError:
return None
# Create an API service that can use the userinfo API. Authorize it with our
# credentials that we gained from the code exchange.
users_service = CreateService('oauth2', 'v2', creds)
# Make a call against the userinfo service to retrieve the user's information.
# In this case we are interested in the user's "id" field.
userid = users_service.userinfo().get().execute().get('id')
# Store the user id in the user's cookie-based session.
session = sessions.LilCookies(self, SESSION_SECRET)
session.set_secure_cookie(name='userid', value=userid)
# Store the credentials in the data store using the userid as the key.
StorageByKeyName(Credentials, userid, 'credentials').put(creds)
return creds
def GetSessionCredentials(self):
"""Get OAuth 2.0 credentials for an HTTP session.
If the user has a user id stored in their cookie session, extract that value
and use it to load that user's credentials from the data store.
Args:
request: HTTP request to use session from.
Returns:
OAuth2.0 credentials suitable for authorizing clients.
"""
# Try to load the user id from the session
session = sessions.LilCookies(self, SESSION_SECRET)
userid = session.get_secure_cookie(name='userid')
if not userid:
# return None to indicate that no credentials could be loaded from the
# session.
return None
# Load the credentials from the data store, using the userid as a key.
creds = StorageByKeyName(Credentials, userid, 'credentials').get()
# if the credentials are invalid, return None to indicate that the credentials
# cannot be used.
if creds and creds.invalid:
return None
return creds
def RedirectAuth(self):
"""Redirect a handler to an authorization page.
Used when a handler fails to fetch credentials suitable for making Drive API
requests. The request is redirected to an OAuth 2.0 authorization approval
page and on approval, are returned to application.
Args:
handler: webapp.RequestHandler to redirect.
"""
flow = self.CreateOAuthFlow()
# Manually add the required scopes. Since this redirect does not originate
# from the Google Drive UI, which authomatically sets the scopes that are
# listed in the API Console.
flow.scope = ALL_SCOPES
# Create the redirect URI by performing step 1 of the OAuth 2.0 web server
# flow.
uri = flow.step1_get_authorize_url(flow.redirect_uri)
# Perform the redirect.
self.redirect(uri)
def RespondJSON(self, data):
"""Generate a JSON response and return it to the client.
Args:
data: The data that will be converted to JSON to return.
"""
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(data))
def CreateAuthorizedService(self, service, version):
"""Create an authorize service instance.
The service can only ever retrieve the credentials from the session.
Args:
service: Service name (e.g 'drive', 'oauth2').
version: Service version (e.g 'v1').
Returns:
Authorized service or redirect to authorization flow if no credentials.
"""
# For the service, the session holds the credentials
creds = self.GetSessionCredentials()
if creds:
# If the session contains credentials, use them to create a Drive service
# instance.
return CreateService(service, version, creds)
else:
# If no credentials could be loaded from the session, redirect the user to
# the authorization page.
self.RedirectAuth()
def CreateDrive(self):
"""Create a drive client instance."""
return self.CreateAuthorizedService('drive', 'v2')
def CreateUserInfo(self):
"""Create a user info client instance."""
return self.CreateAuthorizedService('oauth2', 'v2')
class MainPage(BaseDriveHandler):
"""Web handler for the main page.
Handles requests and returns the user interface for Open With and Create
cases. Responsible for parsing the state provided from the Drive UI and acting
appropriately.
"""
def get(self):
"""Handle GET for Create New and Open With.
This creates an authorized client, and checks whether a resource id has
been passed or not. If a resource ID has been passed, this is the Open
With use-case, otherwise it is the Create New use-case.
"""
# Generate a state instance for the request, this includes the action, and
# the file id(s) that have been sent from the Drive user interface.
drive_state = DriveState.FromRequest(self.request)
if drive_state.action == 'open' and len(drive_state.ids) > 0:
code = self.request.get('code')
if code:
code = '?code=%s' % code
self.redirect('/#edit/%s%s' % (drive_state.ids[0], code))
return
# Fetch the credentials by extracting an OAuth 2.0 authorization code from
# the request URL. If the code is not present, redirect to the OAuth 2.0
# authorization URL.
creds = self.GetCodeCredentials()
if not creds:
return self.RedirectAuth()
# Extract the numerical portion of the client_id from the stored value in
# the OAuth flow. You could also store this value as a separate variable
# somewhere.
client_id = self.CreateOAuthFlow().client_id.split('.')[0].split('-')[0]
self.RenderTemplate()
def RenderTemplate(self):
"""Render a named template in a context."""
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(INDEX_HTML)
class ServiceHandler(BaseDriveHandler):
"""Web handler for the service to read and write to Drive."""
def post(self):
"""Called when HTTP POST requests are received by the web application.
The POST body is JSON which is deserialized and used as values to create a
new file in Drive. The authorization access token for this action is
retreived from the data store.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
# Load the data that has been posted as JSON
data = self.RequestJSON()
# Create a new file data structure.
resource = {
'title': data['title'],
'description': data['description'],
'mimeType': data['mimeType'],
}
try:
# Make an insert request to create a new file. A MediaInMemoryUpload
# instance is used to upload the file body.
resource = service.files().insert(
body=resource,
media_body=MediaInMemoryUpload(
data.get('content', ''),
data['mimeType'],
resumable=True)
).execute()
# Respond with the new file id as JSON.
self.RespondJSON(resource['id'])
except AccessTokenRefreshError:
# In cases where the access token has expired and cannot be refreshed
# (e.g. manual token revoking) redirect the user to the authorization page
# to authorize.
self.RedirectAuth()
def get(self):
"""Called when HTTP GET requests are received by the web application.
Use the query parameter file_id to fetch the required file's metadata then
content and return it as a JSON object.
Since DrEdit deals with text files, it is safe to dump the content directly
into JSON, but this is not the case with binary files, where something like
Base64 encoding is more appropriate.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
try:
# Requests are expected to pass the file_id query parameter.
file_id = self.request.get('file_id')
if file_id:
# Fetch the file metadata by making the service.files().get method of
# the Drive API.
f = service.files().get(fileId=file_id).execute()
downloadUrl = f.get('downloadUrl')
# If a download URL is provided in the file metadata, use it to make an
# authorized request to fetch the file ontent. Set this content in the
# data to return as the 'content' field. If there is no downloadUrl,
# just set empty content.
if downloadUrl:
resp, f['content'] = service._http.request(downloadUrl)
else:
f['content'] = ''
else:
f = None
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(f)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
def put(self):
"""Called when HTTP PUT requests are received by the web application.
The PUT body is JSON which is deserialized and used as values to update
a file in Drive. The authorization access token for this action is
retreived from the data store.
"""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
# Load the data that has been posted as JSON
data = self.RequestJSON()
try:
# Create a new file data structure.
content = data.get('content')
if 'content' in data:
data.pop('content')
if content is not None:
# Make an update request to update the file. A MediaInMemoryUpload
# instance is used to upload the file body. Because of a limitation, this
# request must be made in two parts, the first to update the metadata, and
# the second to update the body.
resource = service.files().update(
fileId=data['resource_id'],
newRevision=self.request.get('newRevision', False),
body=data,
media_body=MediaInMemoryUpload(
content, data['mimeType'], resumable=True)
).execute()
else:
# Only update the metadata, a patch request is prefered but not yet
# supported on Google App Engine; see
# http://code.google.com/p/googleappengine/issues/detail?id=6316.
resource = service.files().update(
fileId=data['resource_id'],
newRevision=self.request.get('newRevision', False),
body=data).execute()
# Respond with the new file id as JSON.
self.RespondJSON(resource['id'])
except AccessTokenRefreshError:
# In cases where the access token has expired and cannot be refreshed
# (e.g. manual token revoking) redirect the user to the authorization page
# to authorize.
self.RedirectAuth()
def RequestJSON(self):
"""Load the request body as JSON.
Returns:
Request body loaded as JSON or None if there is no request body.
"""
if self.request.body:
return json.loads(self.request.body)
class UserHandler(BaseDriveHandler):
"""Web handler for the service to read user information."""
def get(self):
"""Called when HTTP GET requests are received by the web application."""
# Create a Drive service
service = self.CreateUserInfo()
if service is None:
return
try:
result = service.userinfo().get().execute()
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(result)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
class AboutHandler(BaseDriveHandler):
"""Web handler for the service to read user information."""
def get(self):
"""Called when HTTP GET requests are received by the web application."""
# Create a Drive service
service = self.CreateDrive()
if service is None:
return
try:
result = service.about().get().execute()
# Generate a JSON response with the file data and return to the client.
self.RespondJSON(result)
except AccessTokenRefreshError:
# Catch AccessTokenRefreshError which occurs when the API client library
# fails to refresh a token. This occurs, for example, when a refresh token
# is revoked. When this happens the user is redirected to the
# Authorization URL.
self.RedirectAuth()
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed plain text:
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=256*1024, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
# Create an WSGI application suitable for running on App Engine
application = webapp.WSGIApplication(
[('/', MainPage), ('/svc', ServiceHandler), ('/about', AboutHandler),
('/user', UserHandler)],
# XXX Set to False in production.
debug=True
)
def main():
"""Main entry point for executing a request with this handler."""
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
# Copyright (c) 2002, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---
# Author: Chad Lester
# Design and style contributions by:
# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
# Eric Veach, Laurence Gonsalves, Matthew Springer
# Code reorganized a bit by Craig Silverstein
"""This module is used to define and parse command line flags.
This module defines a *distributed* flag-definition policy: rather than
an application having to define all flags in or near main(), each python
module defines flags that are useful to it. When one python module
imports another, it gains access to the other's flags. (This is
implemented by having all modules share a common, global registry object
containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
'FlagValues' object (typically the global FlagValues FLAGS, defined
here). The 'FlagValues' object can scan the command line arguments and
pass flag arguments to the corresponding 'Flag' objects for
value-checking and type conversion. The converted flag values are
available as attributes of the 'FlagValues' object.
Code can access the flag through a FlagValues object, for instance
gflags.FLAGS.myflag. Typically, the __main__ module passes the command
line arguments to gflags.FLAGS for parsing.
At bottom, this module calls getopt(), so getopt functionality is
supported, including short- and long-style flags, and the use of -- to
terminate flags.
Methods defined by the flag module will throw 'FlagsError' exceptions.
The exception argument will be a human-readable string.
FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
take a name, default value, help-string, and optional 'short' name
(one-letter name). Some flags have other arguments, which are described
with the flag.
DEFINE_string: takes any input, and interprets it as a string.
DEFINE_bool or
DEFINE_boolean: typically does not take an argument: say --myflag to
set FLAGS.myflag to true, or --nomyflag to set
FLAGS.myflag to false. Alternately, you can say
--myflag=true or --myflag=t or --myflag=1 or
--myflag=false or --myflag=f or --myflag=0
DEFINE_float: takes an input and interprets it as a floating point
number. Takes optional args lower_bound and upper_bound;
if the number specified on the command line is out of
range, it will raise a FlagError.
DEFINE_integer: takes an input and interprets it as an integer. Takes
optional args lower_bound and upper_bound as for floats.
DEFINE_enum: takes a list of strings which represents legal values. If
the command-line value is not in this list, raise a flag
error. Otherwise, assign to FLAGS.flag as a string.
DEFINE_list: Takes a comma-separated list of strings on the commandline.
Stores them in a python list object.
DEFINE_spaceseplist: Takes a space-separated list of strings on the
commandline. Stores them in a python list object.
Example: --myspacesepflag "foo bar baz"
DEFINE_multistring: The same as DEFINE_string, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of strings),
even if the flag is only on the command line once.
DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of ints), even if
the flag is only on the command line once.
SPECIAL FLAGS: There are a few flags that have special meaning:
--help prints a list of all the flags in a human-readable fashion
--helpshort prints a list of all key flags (see below).
--helpxml prints a list of all flags, in XML format. DO NOT parse
the output of --help and --helpshort. Instead, parse
the output of --helpxml. For more info, see
"OUTPUT FOR --helpxml" below.
--flagfile=foo read flags from file foo.
--undefok=f1,f2 ignore unrecognized option errors for f1,f2.
For boolean flags, you should use --undefok=boolflag, and
--boolflag and --noboolflag will be accepted. Do not use
--undefok=noboolflag.
-- as in getopt(), terminates flag-processing
FLAGS VALIDATORS: If your program:
- requires flag X to be specified
- needs flag Y to match a regular expression
- or requires any more general constraint to be satisfied
then validators are for you!
Each validator represents a constraint over one flag, which is enforced
starting from the initial parsing of the flags and until the program
terminates.
Also, lower_bound and upper_bound for numerical flags are enforced using flag
validators.
Howto:
If you want to enforce a constraint over one flag, use
gflags.RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS)
After flag values are initially parsed, and after any change to the specified
flag, method checker(flag_value) will be executed. If constraint is not
satisfied, an IllegalFlagValue exception will be raised. See
RegisterValidator's docstring for a detailed explanation on how to construct
your own checker.
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
gflags.DEFINE_integer('my_version', 0, 'Version number.')
gflags.DEFINE_string('filename', None, 'Input file name', short_name='f')
gflags.RegisterValidator('my_version',
lambda value: value % 2 == 0,
message='--my_version must be divisible by 2')
gflags.MarkFlagAsRequired('filename')
NOTE ON --flagfile:
Flags may be loaded from text files in addition to being specified on
the commandline.
Any flags you don't feel like typing, throw them in a file, one flag per
line, for instance:
--myflag=myvalue
--nomyboolean_flag
You then specify your file with the special flag '--flagfile=somefile'.
You CAN recursively nest flagfile= tokens OR use multiple files on the
command line. Lines beginning with a single hash '#' or a double slash
'//' are comments in your flagfile.
Any flagfile=<file> will be interpreted as having a relative path from
the current working directory rather than from the place the file was
included from:
myPythonScript.py --flagfile=config/somefile.cfg
If somefile.cfg includes further --flagfile= directives, these will be
referenced relative to the original CWD, not from the directory the
including flagfile was found in!
The caveat applies to people who are including a series of nested files
in a different dir than they are executing out of. Relative path names
are always from CWD, not from the directory of the parent include
flagfile. We do now support '~' expanded directory names.
Absolute path names ALWAYS work!
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
gflags.DEFINE_string('name', 'Mr. President', 'your name')
gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
if FLAGS.debug: print 'non-flag arguments:', argv
print 'Happy Birthday', FLAGS.name
if FLAGS.age is not None:
print 'You are a %d year old %s' % (FLAGS.age, FLAGS.gender)
if __name__ == '__main__':
main(sys.argv)
KEY FLAGS:
As we already explained, each module gains access to all flags defined
by all the other modules it transitively imports. In the case of
non-trivial scripts, this means a lot of flags ... For documentation
purposes, it is good to identify the flags that are key (i.e., really
important) to a module. Clearly, the concept of "key flag" is a
subjective one. When trying to determine whether a flag is key to a
module or not, assume that you are trying to explain your module to a
potential user: which flags would you really like to mention first?
We'll describe shortly how to declare which flags are key to a module.
For the moment, assume we know the set of key flags for each module.
Then, if you use the app.py module, you can use the --helpshort flag to
print only the help for the flags that are key to the main module, in a
human-readable format.
NOTE: If you need to parse the flag help, do NOT use the output of
--help / --helpshort. That output is meant for human consumption, and
may be changed in the future. Instead, use --helpxml; flags that are
key for the main module are marked there with a <key>yes</key> element.
The set of key flags for a module M is composed of:
1. Flags defined by module M by calling a DEFINE_* function.
2. Flags that module M explictly declares as key by using the function
DECLARE_key_flag(<flag_name>)
3. Key flags of other modules that M specifies by using the function
ADOPT_module_key_flags(<other_module>)
This is a "bulk" declaration of key flags: each flag that is key for
<other_module> becomes key for the current module too.
Notice that if you do not use the functions described at points 2 and 3
above, then --helpshort prints information only about the flags defined
by the main module of our script. In many cases, this behavior is good
enough. But if you move part of the main module code (together with the
related flags) into a different module, then it is nice to use
DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
lists all relevant flags (otherwise, your code refactoring may confuse
your users).
Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
pluses and minuses: DECLARE_key_flag is more targeted and may lead a
more focused --helpshort documentation. ADOPT_module_key_flags is good
for cases when an entire module is considered key to the current script.
Also, it does not require updates to client scripts when a new flag is
added to the module.
EXAMPLE USAGE 2 (WITH KEY FLAGS):
Consider an application that contains the following three files (two
auxiliary modules and a main module)
File libfoo.py:
import gflags
gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
... some code ...
File libbar.py:
import gflags
gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
'Path to the GFS files for libbar.')
gflags.DEFINE_string('email_for_bar_errors', 'bar-team@google.com',
'Email address for bug reports about module libbar.')
gflags.DEFINE_boolean('bar_risky_hack', False,
'Turn on an experimental and buggy optimization.')
... some code ...
File myscript.py:
import gflags
import libfoo
import libbar
gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
# Declare that all flags that are key for libfoo are
# key for this module too.
gflags.ADOPT_module_key_flags(libfoo)
# Declare that the flag --bar_gfs_path (defined in libbar) is key
# for this module.
gflags.DECLARE_key_flag('bar_gfs_path')
... some code ...
When myscript is invoked with the flag --helpshort, the resulted help
message lists information about all the key flags for myscript:
--num_iterations, --num_replicas, --rpc2, and --bar_gfs_path.
Of course, myscript uses all the flags declared by it (in this case,
just --num_replicas) or by any of the modules it transitively imports
(e.g., the modules libfoo, libbar). E.g., it can access the value of
FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
flag for myscript.
OUTPUT FOR --helpxml:
The --helpxml flag generates output with the following structure:
<?xml version="1.0"?>
<AllFlags>
<program>PROGRAM_BASENAME</program>
<usage>MAIN_MODULE_DOCSTRING</usage>
(<flag>
[<key>yes</key>]
<file>DECLARING_MODULE</file>
<name>FLAG_NAME</name>
<meaning>FLAG_HELP_MESSAGE</meaning>
<default>DEFAULT_FLAG_VALUE</default>
<current>CURRENT_FLAG_VALUE</current>
<type>FLAG_TYPE</type>
[OPTIONAL_ELEMENTS]
</flag>)*
</AllFlags>
Notes:
1. The output is intentionally similar to the output generated by the
C++ command-line flag library. The few differences are due to the
Python flags that do not have a C++ equivalent (at least not yet),
e.g., DEFINE_list.
2. New XML elements may be added in the future.
3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can
pass for this flag on the command-line. E.g., for a flag defined
using DEFINE_list, this field may be foo,bar, not ['foo', 'bar'].
4. CURRENT_FLAG_VALUE is produced using str(). This means that the
string 'false' will be represented in the same way as the boolean
False. Using repr() would have removed this ambiguity and simplified
parsing, but would have broken the compatibility with the C++
command-line flags.
5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of
flags: lower_bound, upper_bound (for flags that specify bounds),
enum_value (for enum flags), list_separator (for flags that consist of
a list of values, separated by a special token).
6. We do not provide any example here: please use --helpxml instead.
This module requires at least python 2.2.1 to run.
"""
import cgi
import getopt
import os
import re
import string
import struct
import sys
# pylint: disable-msg=C6204
try:
import fcntl
except ImportError:
fcntl = None
try:
# Importing termios will fail on non-unix platforms.
import termios
except ImportError:
termios = None
import gflags_validators
# pylint: enable-msg=C6204
# Are we running under pychecker?
_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
def _GetCallingModuleObjectAndName():
"""Returns the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
"""
# Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()):
if not sys._getframe(depth).f_globals is globals():
globals_for_frame = sys._getframe(depth).f_globals
module, module_name = _GetModuleObjectAndName(globals_for_frame)
if module_name is not None:
return module, module_name
raise AssertionError("No module was found")
def _GetCallingModule():
"""Returns the name of the module that's calling into this module."""
return _GetCallingModuleObjectAndName()[1]
def _GetThisModuleObjectAndName():
"""Returns: (module object, module name) for this module."""
return _GetModuleObjectAndName(globals())
# module exceptions:
class FlagsError(Exception):
"""The base class for all flags errors."""
pass
class DuplicateFlag(FlagsError):
"""Raised if there is a flag naming conflict."""
pass
class CantOpenFlagFileError(FlagsError):
"""Raised if flagfile fails to open: doesn't exist, wrong permissions, etc."""
pass
class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag):
"""Special case of DuplicateFlag -- SWIG flag value can't be set to None.
This can be raised when a duplicate flag is created. Even if allow_override is
True, we still abort if the new value is None, because it's currently
impossible to pass None default value back to SWIG. See FlagValues.SetDefault
for details.
"""
pass
class DuplicateFlagError(DuplicateFlag):
"""A DuplicateFlag whose message cites the conflicting definitions.
A DuplicateFlagError conveys more information than a DuplicateFlag,
namely the modules where the conflicting definitions occur. This
class was created to avoid breaking external modules which depend on
the existing DuplicateFlags interface.
"""
def __init__(self, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
"""
self.flagname = flagname
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
msg = "The flag '%s' is defined twice. First from %s, Second from %s" % (
self.flagname, first_module, second_module)
DuplicateFlag.__init__(self, msg)
class IllegalFlagValue(FlagsError):
"""The flag command line argument is illegal."""
pass
class UnrecognizedFlag(FlagsError):
"""Raised if a flag is unrecognized."""
pass
# An UnrecognizedFlagError conveys more information than an UnrecognizedFlag.
# Since there are external modules that create DuplicateFlags, the interface to
# DuplicateFlag shouldn't change. The flagvalue will be assigned the full value
# of the flag and its argument, if any, allowing handling of unrecognized flags
# in an exception handler.
# If flagvalue is the empty string, then this exception is an due to a
# reference to a flag that was not already defined.
class UnrecognizedFlagError(UnrecognizedFlag):
def __init__(self, flagname, flagvalue=''):
self.flagname = flagname
self.flagvalue = flagvalue
UnrecognizedFlag.__init__(
self, "Unknown command line flag '%s'" % flagname)
# Global variable used by expvar
_exported_flags = {}
_help_width = 80 # width of help output
def GetHelpWidth():
"""Returns: an integer, the width of help lines that is used in TextWrap."""
if (not sys.stdout.isatty()) or (termios is None) or (fcntl is None):
return _help_width
try:
data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234')
columns = struct.unpack('hh', data)[1]
# Emacs mode returns 0.
# Here we assume that any value below 40 is unreasonable
if columns >= 40:
return columns
# Returning an int as default is fine, int(int) just return the int.
return int(os.getenv('COLUMNS', _help_width))
except (TypeError, IOError, struct.error):
return _help_width
def CutCommonSpacePrefix(text):
"""Removes a common space prefix from the lines of a multiline text.
If the first line does not start with a space, it is left as it is and
only in the remaining lines a common space prefix is being searched
for. That means the first line will stay untouched. This is especially
useful to turn doc strings into help texts. This is because some
people prefer to have the doc comment start already after the
apostrophe and then align the following lines while others have the
apostrophes on a separate line.
The function also drops trailing empty lines and ignores empty lines
following the initial content line while calculating the initial
common whitespace.
Args:
text: text to work on
Returns:
the resulting text
"""
text_lines = text.splitlines()
# Drop trailing empty lines
while text_lines and not text_lines[-1]:
text_lines = text_lines[:-1]
if text_lines:
# We got some content, is the first line starting with a space?
if text_lines[0] and text_lines[0][0].isspace():
text_first_line = []
else:
text_first_line = [text_lines.pop(0)]
# Calculate length of common leading whitespace (only over content lines)
common_prefix = os.path.commonprefix([line for line in text_lines if line])
space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
# If we have a common space prefix, drop it from all lines
if space_prefix_len:
for index in xrange(len(text_lines)):
if text_lines[index]:
text_lines[index] = text_lines[index][space_prefix_len:]
return '\n'.join(text_first_line + text_lines)
return ''
def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
"""Wraps a given text to a maximum line length and returns it.
We turn lines that only contain whitespace into empty lines. We keep
new lines and tabs (e.g., we do not treat tabs as spaces).
Args:
text: text to wrap
length: maximum length of a line, includes indentation
if this is None then use GetHelpWidth()
indent: indent for all but first line
firstline_indent: indent for first line; if None, fall back to indent
tabs: replacement for tabs
Returns:
wrapped text
Raises:
FlagsError: if indent not shorter than length
FlagsError: if firstline_indent not shorter than length
"""
# Get defaults where callee used None
if length is None:
length = GetHelpWidth()
if indent is None:
indent = ''
if len(indent) >= length:
raise FlagsError('Indent must be shorter than length')
# In line we will be holding the current line which is to be started
# with indent (or firstline_indent if available) and then appended
# with words.
if firstline_indent is None:
firstline_indent = ''
line = indent
else:
line = firstline_indent
if len(firstline_indent) >= length:
raise FlagsError('First line indent must be shorter than length')
# If the callee does not care about tabs we simply convert them to
# spaces If callee wanted tabs to be single space then we do that
# already here.
if not tabs or tabs == ' ':
text = text.replace('\t', ' ')
else:
tabs_are_whitespace = not tabs.strip()
line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
# Split the text into lines and the lines with the regex above. The
# resulting lines are collected in result[]. For each split we get the
# spaces, the tabs and the next non white space (e.g. next word).
result = []
for text_line in text.splitlines():
# Store result length so we can find out whether processing the next
# line gave any new content
old_result_len = len(result)
# Process next line with line_regex. For optimization we do an rstrip().
# - process tabs (changes either line or word, see below)
# - process word (first try to squeeze on line, then wrap or force wrap)
# Spaces found on the line are ignored, they get added while wrapping as
# needed.
for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
# If tabs weren't converted to spaces, handle them now
if current_tabs:
# If the last thing we added was a space anyway then drop
# it. But let's not get rid of the indentation.
if (((result and line != indent) or
(not result and line != firstline_indent)) and line[-1] == ' '):
line = line[:-1]
# Add the tabs, if that means adding whitespace, just add it at
# the line, the rstrip() code while shorten the line down if
# necessary
if tabs_are_whitespace:
line += tabs * len(current_tabs)
else:
# if not all tab replacement is whitespace we prepend it to the word
word = tabs * len(current_tabs) + word
# Handle the case where word cannot be squeezed onto current last line
if len(line) + len(word) > length and len(indent) + len(word) <= length:
result.append(line.rstrip())
line = indent + word
word = ''
# No space left on line or can we append a space?
if len(line) + 1 >= length:
result.append(line.rstrip())
line = indent
else:
line += ' '
# Add word and shorten it up to allowed line length. Restart next
# line with indent and repeat, or add a space if we're done (word
# finished) This deals with words that cannot fit on one line
# (e.g. indent + word longer than allowed line length).
while len(line) + len(word) >= length:
line += word
result.append(line[:length])
word = line[length:]
line = indent
# Default case, simply append the word and a space
if word:
line += word + ' '
# End of input line. If we have content we finish the line. If the
# current line is just the indent but we had content in during this
# original line then we need to add an empty line.
if (result and line != indent) or (not result and line != firstline_indent):
result.append(line.rstrip())
elif len(result) == old_result_len:
result.append('')
line = indent
return '\n'.join(result)
def DocToHelp(doc):
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings
doc = CutCommonSpacePrefix(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space
# 1) keep double new lines
# 2) keep ws after new lines if not empty line
# 3) all other new lines shall be changed to a space
# Solution: Match new lines between non white space and replace with space.
doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
return doc
def _GetModuleObjectAndName(globals_dict):
"""Returns the module that defines a global environment, and its name.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
A pair consisting of (1) module object and (2) module name (a
string). Returns (None, None) if the module could not be
identified.
"""
# The use of .items() (instead of .iteritems()) is NOT a mistake: if
# a parallel thread imports a module while we iterate over
# .iteritems() (not nice, but possible), we get a RuntimeError ...
# Hence, we use the slightly slower but safer .items().
for name, module in sys.modules.items():
if getattr(module, '__dict__', None) is globals_dict:
if name == '__main__':
# Pick a more informative name for the main module.
name = sys.argv[0]
return (module, name)
return (None, None)
def _GetMainModule():
"""Returns: string, name of the module from which execution started."""
# First, try to use the same logic used by _GetCallingModuleObjectAndName(),
# i.e., call _GetModuleObjectAndName(). For that we first need to
# find the dictionary that the main module uses to store the
# globals.
#
# That's (normally) the same dictionary object that the deepest
# (oldest) stack frame is using for globals.
deepest_frame = sys._getframe(0)
while deepest_frame.f_back is not None:
deepest_frame = deepest_frame.f_back
globals_for_main_module = deepest_frame.f_globals
main_module_name = _GetModuleObjectAndName(globals_for_main_module)[1]
# The above strategy fails in some cases (e.g., tools that compute
# code coverage by redefining, among other things, the main module).
# If so, just use sys.argv[0]. We can probably always do this, but
# it's safest to try to use the same logic as _GetCallingModuleObjectAndName()
if main_module_name is None:
main_module_name = sys.argv[0]
return main_module_name
class FlagValues:
"""Registry of 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: gflags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value attribute of the registered 'Flag' objects can be accessed
as attributes of this 'FlagValues' object, through __getattr__. Both
the long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
def __init__(self):
# Since everything in this class is so heavily overloaded, the only
# way of defining and using fields is to access __dict__ directly.
# Dictionary: flag name (string) -> Flag object.
self.__dict__['__flags'] = {}
# Dictionary: module name (string) -> list of Flag objects that are defined
# by that module.
self.__dict__['__flags_by_module'] = {}
# Dictionary: module id (int) -> list of Flag objects that are defined by
# that module.
self.__dict__['__flags_by_module_id'] = {}
# Dictionary: module name (string) -> list of Flag objects that are
# key for that module.
self.__dict__['__key_flags_by_module'] = {}
# Set if we should use new style gnu_getopt rather than getopt when parsing
# the args. Only possible with Python 2.3+
self.UseGnuGetOpt(False)
def UseGnuGetOpt(self, use_gnu_getopt=True):
"""Use GNU-style scanning. Allows mixing of flag and non-flag arguments.
See http://docs.python.org/library/getopt.html#getopt.gnu_getopt
Args:
use_gnu_getopt: wether or not to use GNU style scanning.
"""
self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
def IsGnuGetOpt(self):
return self.__dict__['__use_gnu_getopt']
def FlagDict(self):
return self.__dict__['__flags']
def FlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
def FlagsByModuleIdDict(self):
"""Returns the dictionary of module_id -> list of defined flags.
Returns:
A dictionary. Its keys are module IDs (ints). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module_id']
def KeyFlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of key flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__key_flags_by_module']
def _RegisterFlagByModule(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module = self.FlagsByModuleDict()
flags_by_module.setdefault(module_name, []).append(flag)
def _RegisterFlagByModuleId(self, module_id, flag):
"""Records the module that defines a specific flag.
Args:
module_id: An int, the ID of the Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module_id = self.FlagsByModuleIdDict()
flags_by_module_id.setdefault(module_id, []).append(flag)
def _RegisterKeyFlagForModule(self, module_name, flag):
"""Specifies that a flag is a key flag for a module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
key_flags_by_module = self.KeyFlagsByModuleDict()
# The list of key flags for the module named module_name.
key_flags = key_flags_by_module.setdefault(module_name, [])
# Add flag, but avoid duplicates.
if flag not in key_flags:
key_flags.append(flag)
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, []))
def _GetKeyFlagsForModule(self, module):
"""Returns the list of key flags for a module.
Args:
module: A module object or a module name (a string)
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
# Any flag is a key flag for the module that defined it. NOTE:
# key_flags is a fresh list: we can update it without affecting the
# internals of this FlagValues object.
key_flags = self._GetFlagsDefinedByModule(module)
# Take into account flags explicitly declared as key for a module.
for flag in self.KeyFlagsByModuleDict().get(module, []):
if flag not in key_flags:
key_flags.append(flag)
return key_flags
def FindModuleDefiningFlag(self, flagname, default=None):
"""Return the name of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module, flags in self.FlagsByModuleDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module
return default
def FindModuleIdDefiningFlag(self, flagname, default=None):
"""Return the ID of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The ID of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module_id, flags in self.FlagsByModuleIdDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module_id
return default
def AppendFlagValues(self, flag_values):
"""Appends flags registered in another FlagValues instance.
Args:
flag_values: registry to copy from
"""
for flag_name, flag in flag_values.FlagDict().iteritems():
# Each flags with shortname appears here twice (once under its
# normal name, and again with its short name). To prevent
# problems (DuplicateFlagError) with double flag registration, we
# perform a check to make sure that the entry we're looking at is
# for its normal name.
if flag_name == flag.name:
try:
self[flag_name] = flag
except DuplicateFlagError:
raise DuplicateFlagError(flag_name, self,
other_flag_values=flag_values)
def RemoveFlagValues(self, flag_values):
"""Remove flags that were previously appended from another FlagValues.
Args:
flag_values: registry containing flags to remove.
"""
for flag_name in flag_values.FlagDict():
self.__delattr__(flag_name)
def __setitem__(self, name, flag):
"""Registers a new flag variable."""
fl = self.FlagDict()
if not isinstance(flag, Flag):
raise IllegalFlagValue(flag)
if not isinstance(name, type("")):
raise FlagsError("Flag name must be a string")
if len(name) == 0:
raise FlagsError("Flag name cannot be empty")
# If running under pychecker, duplicate keys are likely to be
# defined. Disable check for duplicate keys when pycheck'ing.
if (name in fl and not flag.allow_override and
not fl[name].allow_override and not _RUNNING_PYCHECKER):
module, module_name = _GetCallingModuleObjectAndName()
if (self.FindModuleDefiningFlag(name) == module_name and
id(module) != self.FindModuleIdDefiningFlag(name)):
# If the flag has already been defined by a module with the same name,
# but a different ID, we can stop here because it indicates that the
# module is simply being imported a subsequent time.
return
raise DuplicateFlagError(name, self)
short_name = flag.short_name
if short_name is not None:
if (short_name in fl and not flag.allow_override and
not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(short_name, self)
fl[short_name] = flag
fl[name] = flag
global _exported_flags
_exported_flags[name] = flag
def __getitem__(self, name):
"""Retrieves the Flag object for the flag --name."""
return self.FlagDict()[name]
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
return fl[name].value
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
fl = self.FlagDict()
fl[name].value = value
self._AssertValidators(fl[name].validators)
return value
def _AssertAllValidators(self):
all_validators = set()
for flag in self.FlagDict().itervalues():
for validator in flag.validators:
all_validators.add(validator)
self._AssertValidators(all_validators)
def _AssertValidators(self, validators):
"""Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(gflags_validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValue: if validation fails for at least one validator
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.Verify(self)
except gflags_validators.Error, e:
message = validator.PrintFlagsWithValues(self)
raise IllegalFlagValue('%s: %s' % (message, str(e)))
def _FlagIsRegistered(self, flag_obj):
"""Checks whether a Flag object is registered under some name.
Note: this is non trivial: in addition to its normal name, a flag
may have a short name too. In self.FlagDict(), both the normal and
the short name are mapped to the same flag object. E.g., calling
only "del FLAGS.short_name" is not unregistering the corresponding
Flag object (it is still registered under the longer name).
Args:
flag_obj: A Flag object.
Returns:
A boolean: True iff flag_obj is registered under some name.
"""
flag_dict = self.FlagDict()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
# The flag cannot be registered under any other name, so we do not
# need to do a full search through the values of self.FlagDict().
return False
def __delattr__(self, flag_name):
"""Deletes a previously-defined flag from a flag object.
This method makes sure we can delete a flag by using
del flag_values_object.<flag_name>
E.g.,
gflags.DEFINE_integer('foo', 1, 'Integer flag.')
del gflags.FLAGS.foo
Args:
flag_name: A string, the name of the flag to be deleted.
Raises:
AttributeError: When there is no registered flag named flag_name.
"""
fl = self.FlagDict()
if flag_name not in fl:
raise AttributeError(flag_name)
flag_obj = fl[flag_name]
del fl[flag_name]
if not self._FlagIsRegistered(flag_obj):
# If the Flag object indicated by flag_name is no longer
# registered (please see the docstring of _FlagIsRegistered), then
# we delete the occurrences of the flag object in all our internal
# dictionaries.
self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.FlagsByModuleIdDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
"""Removes a flag object from a module -> list of flags dictionary.
Args:
flags_by_module_dict: A dictionary that maps module names to lists of
flags.
flag_obj: A flag object.
"""
for unused_module, flags_in_module in flags_by_module_dict.iteritems():
# while (as opposed to if) takes care of multiple occurrences of a
# flag in the list for the same module.
while flag_obj in flags_in_module:
flags_in_module.remove(flag_obj)
def SetDefault(self, name, value):
"""Changes the default value of the named flag object."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
fl[name].SetDefault(value)
self._AssertValidators(fl[name].validators)
def __contains__(self, name):
"""Returns True if name is a value (flag) in the dict."""
return name in self.FlagDict()
has_key = __contains__ # a synonym for __contains__()
def __iter__(self):
return iter(self.FlagDict())
def __call__(self, argv):
"""Parses flags from argv; stores parsed flags into this FlagValues object.
All unparsed arguments are returned. Flags are parsed using the GNU
Program Argument Syntax Conventions, using getopt:
http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
Args:
argv: argument list. Can be of any type that may be converted to a list.
Returns:
The list of arguments not parsed as options, including argv[0]
Raises:
FlagsError: on any parsing error
"""
# Support any sequence type that can be converted to a list
argv = list(argv)
shortopts = ""
longopts = []
fl = self.FlagDict()
# This pre parses the argv list for --flagfile=<> options.
argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False)
# Correct the argv to support the google style of passing boolean
# parameters. Boolean parameters may be passed by using --mybool,
# --nomybool, --mybool=(true|false|1|0). getopt does not support
# having options that may or may not have a parameter. We replace
# instances of the short form --mybool and --nomybool with their
# full forms: --mybool=(true|false).
original_argv = list(argv) # list() makes a copy
shortest_matches = None
for name, flag in fl.items():
if not flag.boolean:
continue
if shortest_matches is None:
# Determine the smallest allowable prefix for all flag names
shortest_matches = self.ShortestUniquePrefixes(fl)
no_name = 'no' + name
prefix = shortest_matches[name]
no_prefix = shortest_matches[no_name]
# Replace all occurrences of this boolean with extended forms
for arg_idx in range(1, len(argv)):
arg = argv[arg_idx]
if arg.find('=') >= 0: continue
if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
argv[arg_idx] = ('--%s=true' % name)
elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
argv[arg_idx] = ('--%s=false' % name)
# Loop over all of the flags, building up the lists of short options
# and long options that will be passed to getopt. Short options are
# specified as a string of letters, each letter followed by a colon
# if it takes an argument. Long options are stored in an array of
# strings. Each string ends with an '=' if it takes an argument.
for name, flag in fl.items():
longopts.append(name + "=")
if len(name) == 1: # one-letter option: allow short flag type also
shortopts += name
if not flag.boolean:
shortopts += ":"
longopts.append('undefok=')
undefok_flags = []
# In case --undefok is specified, loop to pick up unrecognized
# options one by one.
unrecognized_opts = []
args = argv[1:]
while True:
try:
if self.__dict__['__use_gnu_getopt']:
optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
else:
optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
break
except getopt.GetoptError, e:
if not e.opt or e.opt in fl:
# Not an unrecognized option, re-raise the exception as a FlagsError
raise FlagsError(e)
# Remove offender from args and try again
for arg_index in range(len(args)):
if ((args[arg_index] == '--' + e.opt) or
(args[arg_index] == '-' + e.opt) or
(args[arg_index].startswith('--' + e.opt + '='))):
unrecognized_opts.append((e.opt, args[arg_index]))
args = args[0:arg_index] + args[arg_index+1:]
break
else:
# We should have found the option, so we don't expect to get
# here. We could assert, but raising the original exception
# might work better.
raise FlagsError(e)
for name, arg in optlist:
if name == '--undefok':
flag_names = arg.split(',')
undefok_flags.extend(flag_names)
# For boolean flags, if --undefok=boolflag is specified, then we should
# also accept --noboolflag, in addition to --boolflag.
# Since we don't know the type of the undefok'd flag, this will affect
# non-boolean flags as well.
# NOTE: You shouldn't use --undefok=noboolflag, because then we will
# accept --nonoboolflag here. We are choosing not to do the conversion
# from noboolflag -> boolflag because of the ambiguity that flag names
# can start with 'no'.
undefok_flags.extend('no' + name for name in flag_names)
continue
if name.startswith('--'):
# long option
name = name[2:]
short_option = 0
else:
# short option
name = name[1:]
short_option = 1
if name in fl:
flag = fl[name]
if flag.boolean and short_option: arg = 1
flag.Parse(arg)
# If there were unrecognized options, raise an exception unless
# the options were named via --undefok.
for opt, value in unrecognized_opts:
if opt not in undefok_flags:
raise UnrecognizedFlagError(opt, value)
if unparsed_args:
if self.__dict__['__use_gnu_getopt']:
# if using gnu_getopt just return the program name + remainder of argv.
ret_val = argv[:1] + unparsed_args
else:
# unparsed_args becomes the first non-flag detected by getopt to
# the end of argv. Because argv may have been modified above,
# return original_argv for this region.
ret_val = argv[:1] + original_argv[-len(unparsed_args):]
else:
ret_val = argv[:1]
self._AssertAllValidators()
return ret_val
def Reset(self):
"""Resets the values to the point before FLAGS(argv) was called."""
for f in self.FlagDict().values():
f.Unparse()
def RegisteredFlags(self):
"""Returns: a list of the names and short names of all registered flags."""
return list(self.FlagDict())
def FlagValuesDict(self):
"""Returns: a dictionary that maps flag names to flag values."""
flag_values = {}
for flag_name in self.RegisteredFlags():
flag = self.FlagDict()[flag_name]
flag_values[flag_name] = flag.value
return flag_values
def __str__(self):
"""Generates a help string for all known flags."""
return self.GetHelp()
def GetHelp(self, prefix=''):
"""Generates a help string for all known flags."""
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = sorted(flags_by_module)
# Print the help for the main module first, if possible.
main_module = _GetMainModule()
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
self.__RenderModuleFlags('gflags',
_SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
# Just print one long list of flags.
self.__RenderFlagList(
self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(),
helplist, prefix)
return '\n'.join(helplist)
def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
"""Generates a help string for a given module."""
if not isinstance(module, str):
module = module.__name__
output_lines.append('\n%s%s:' % (prefix, module))
self.__RenderFlagList(flags, output_lines, prefix + " ")
def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
"""Generates a help string for a given module."""
flags = self._GetFlagsDefinedByModule(module)
if flags:
self.__RenderModuleFlags(module, flags, output_lines, prefix)
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist)
def MainModuleHelp(self):
"""Describe the key flags of the main module.
Returns:
string describing the key flags of a module.
"""
return self.ModuleHelp(_GetMainModule())
def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
fl = self.FlagDict()
special_fl = _SPECIAL_FLAGS.FlagDict()
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the FlagDict.
if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
# a different flag is using this name now
continue
# only print help once
if flag in flagset: continue
flagset[flag] = 1
flaghelp = ""
if flag.short_name: flaghelp += "-%s," % flag.short_name
if flag.boolean:
flaghelp += "--[no]%s" % flag.name + ":"
else:
flaghelp += "--%s" % flag.name + ":"
flaghelp += " "
if flag.help:
flaghelp += flag.help
flaghelp = TextWrap(flaghelp, indent=prefix+" ",
firstline_indent=prefix)
if flag.default_as_str:
flaghelp += "\n"
flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
indent=prefix+" ")
if flag.parser.syntactic_help:
flaghelp += "\n"
flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
indent=prefix+" ")
output_lines.append(flaghelp)
def get(self, name, default):
"""Returns the value of a flag (if not None) or a default value.
Args:
name: A string, the name of a flag.
default: Default value to use if the flag value is None.
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def ShortestUniquePrefixes(self, fl):
"""Returns: dictionary; maps flag names to their shortest unique prefix."""
# Sort the list of flag names
sorted_flags = []
for name, flag in fl.items():
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique
# prefix by comparing itself to the next name and to the previous
# name (the latter check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
def __IsFlagFileDirective(self, flag_string):
"""Checks whether flag_string contain a --flagfile=<foo> directive."""
if isinstance(flag_string, type("")):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def ExtractFilename(self, flagfile_str):
"""Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
def __GetFlagFileLines(self, filename, parsed_file_list):
"""Returns the useful (!=comments, etc) lines from a file with flags.
Args:
filename: A string, the name of the flag file.
parsed_file_list: A list of the names of the files we have
already read. MUTATED BY THIS FUNCTION.
Returns:
List of strings. See the note below.
NOTE(springer): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list of
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError, e_msg:
raise CantOpenFlagFileError('ERROR:: Unable to open flagfile: %s' % e_msg)
line_list = file_obj.readlines()
file_obj.close()
parsed_file_list.append(filename)
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif line.startswith('#') or line.startswith('//'):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self.__IsFlagFileDirective(line):
sub_filename = self.ExtractFilename(line)
# We do a little safety check for reparsing a file we've already done.
if not sub_filename in parsed_file_list:
included_flags = self.__GetFlagFileLines(sub_filename,
parsed_file_list)
flag_line_list.extend(included_flags)
else: # Case of hitting a circularly included file.
sys.stderr.write('Warning: Hit circular flagfile dependency: %s\n' %
(sub_filename,))
else:
# Any line that's not a comment or a nested flagfile should get
# copied into 2nd position. This leaves earlier arguments
# further back in the list, thus giving them higher priority.
flag_line_list.append(line.strip())
return flag_line_list
def ReadFlagsFromFiles(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: A list of strings, usually sys.argv[1:], which may contain one or
more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: If False, --flagfile parsing obeys normal flag semantics.
If True, --flagfile parsing instead follows gnu_getopt semantics.
*** WARNING *** force_gnu=False may become the future default!
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
References: Global gflags.FLAG class instance.
This function should be called before the normal FLAGS(argv) call.
This function scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list between the
first item of the list and any subsequent items in the list.
Note that your application's flags are still defined the usual way
using gflags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> Flags from the command line argv _should_ always take precedence!
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be processed after the parent flag file is done.
--> For duplicate flags, first one we hit should "win".
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
parsed_file_list = []
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self.__IsFlagFileDirective(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise IllegalFlagValue('--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self.ExtractFilename(current_arg)
new_argv.extend(
self.__GetFlagFileLines(flag_filename, parsed_file_list))
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.Serialize() + '\n'
return s
def AppendFlagsIntoFile(self, filename):
"""Appends all flags assignments from this FlagInfo object to a file.
Output will be in the format of a flagfile.
NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile
from http://code.google.com/p/google-gflags
"""
out_file = open(filename, 'a')
out_file.write(self.FlagsIntoString())
out_file.close()
def WriteHelpInXMLFormat(self, outfile=None):
"""Outputs flag documentation in XML format.
NOTE: We use element names that are consistent with those used by
the C++ command-line flag library, from
http://code.google.com/p/google-gflags
We also use a few new elements (e.g., <key>), but we do not
interfere / overlap with existing XML elements used by the C++
library. Please maintain this consistency.
Args:
outfile: File object we write to. Default None means sys.stdout.
"""
outfile = outfile or sys.stdout
outfile.write('<?xml version=\"1.0\"?>\n')
outfile.write('<AllFlags>\n')
indent = ' '
_WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
indent)
usage_doc = sys.modules['__main__'].__doc__
if not usage_doc:
usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
else:
usage_doc = usage_doc.replace('%s', sys.argv[0])
_WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
# Get list of key flags for the main module.
key_flags = self._GetKeyFlagsForModule(_GetMainModule())
# Sort flags by declaring module name and next by flag name.
flags_by_module = self.FlagsByModuleDict()
all_module_names = list(flags_by_module.keys())
all_module_names.sort()
for module_name in all_module_names:
flag_list = [(f.name, f) for f in flags_by_module[module_name]]
flag_list.sort()
for unused_flag_name, flag in flag_list:
is_key = flag in key_flags
flag.WriteInfoInXMLFormat(outfile, module_name,
is_key=is_key, indent=indent)
outfile.write('</AllFlags>\n')
outfile.flush()
def AddValidator(self, validator):
"""Register new flags validator to be checked.
Args:
validator: gflags_validators.Validator
Raises:
AttributeError: if validators work with a non-existing flag.
"""
for flag_name in validator.GetFlagsNames():
flag = self.FlagDict()[flag_name]
flag.validators.append(validator)
# end of FlagValues definition
# The global FlagValues instance
FLAGS = FlagValues()
def _StrOrUnicode(value):
"""Converts value to a python string or, if necessary, unicode-string."""
try:
return str(value)
except UnicodeEncodeError:
return unicode(value)
def _MakeXMLSafe(s):
"""Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
s = cgi.escape(s) # Escape <, >, and &
# Remove characters that cannot appear in an XML 1.0 document
# (http://www.w3.org/TR/REC-xml/#charsets).
#
# NOTE: if there are problems with current solution, one may move to
# XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
# Convert non-ascii characters to entities. Note: requires python >=2.3
s = s.encode('ascii', 'xmlcharrefreplace') # u'\xce\x88' -> 'uΈ'
return s
def _WriteSimpleXMLElement(outfile, name, value, indent):
"""Writes a simple XML element.
Args:
outfile: File object we write the XML element to.
name: A string, the name of XML element.
value: A Python object, whose string representation will be used
as the value of the XML element.
indent: A string, prepended to each line of generated output.
"""
value_str = _StrOrUnicode(value)
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
value_str = value_str.lower()
safe_value_str = _MakeXMLSafe(value_str)
outfile.write('%s<%s>%s</%s>\n' % (indent, name, safe_value_str, name))
class Flag:
"""Information about a command-line flag.
'Flag' objects define the following fields:
.name - the name for this flag
.default - the default value for this flag
.default_as_str - default value as repr'd string, e.g., "'true'" (or None)
.value - the most recent parsed value of this flag; set by Parse()
.help - a help string or None if no help is available
.short_name - the single letter alias for this flag (or None)
.boolean - if 'true', this flag does not accept arguments
.present - true if this flag was parsed from command line flags.
.parser - an ArgumentParser object
.serializer - an ArgumentSerializer object
.allow_override - the flag may be redefined without raising an error
The only public method of a 'Flag' object is Parse(), but it is
typically only called by a 'FlagValues' object. The Parse() method is
a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
value is saved in .value, and the .present attribute is updated. If
this flag was already present, a FlagsError is raised.
Parse() is also called during __init__ to parse the default value and
initialize the .value attribute. This enables other python modules to
safely use flags even if the __main__ module neglects to parse the
command line arguments. The .present attribute is cleared after
__init__ parsing. If the default value is set to None, then the
__init__ parsing step is skipped and the .value attribute is
initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
def __init__(self, parser, serializer, name, default, help_string,
short_name=None, boolean=0, allow_override=0):
self.name = name
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.value = None
self.validators = []
self.SetDefault(default)
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return self is other
def __lt__(self, other):
if isinstance(other, Flag):
return id(self) < id(other)
return NotImplemented
def __GetParsedValueAsString(self, value):
if value is None:
return None
if self.serializer:
return repr(self.serializer.Serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(_StrOrUnicode(value))
def Parse(self, argument):
try:
self.value = self.parser.Parse(argument)
except ValueError, e: # recast ValueError as IllegalFlagValue
raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e))
self.present += 1
def Unparse(self):
if self.default is None:
self.value = None
else:
self.Parse(self.default)
self.present = 0
def Serialize(self):
if self.value is None:
return ''
if self.boolean:
if self.value:
return "--%s" % self.name
else:
return "--no%s" % self.name
else:
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
def SetDefault(self, value):
"""Changes the default value (and current value too) for this Flag."""
# We can't allow a None override because it may end up not being
# passed to C++ code when we're overriding C++ flags. So we
# cowardly bail out until someone fixes the semantics of trying to
# pass None to a C++ flag. See swig_flags.Init() for details on
# this behavior.
# TODO(olexiy): Users can directly call this method, bypassing all flags
# validators (we don't have FlagValues here, so we can not check
# validators).
# The simplest solution I see is to make this method private.
# Another approach would be to store reference to the corresponding
# FlagValues with each flag, but this seems to be an overkill.
if value is None and self.allow_override:
raise DuplicateFlagCannotPropagateNoneToSwig(self.name)
self.default = value
self.Unparse()
self.default_as_str = self.__GetParsedValueAsString(self.value)
def Type(self):
"""Returns: a string that describes the type of this Flag."""
# NOTE: we use strings, and not the types.*Type constants because
# our flags can have more exotic types, e.g., 'comma separated list
# of strings', 'whitespace separated list of strings', etc.
return self.parser.Type()
def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
"""Writes common info about this flag, in XML format.
This is information that is relevant to all flags (e.g., name,
meaning, etc.). If you defined a flag that has some other pieces of
info, then please override _WriteCustomInfoInXMLFormat.
Please do NOT override this method.
Args:
outfile: File object we write to.
module_name: A string, the name of the module that defines this flag.
is_key: A boolean, True iff this flag is key for main module.
indent: A string that is prepended to each generated line.
"""
outfile.write(indent + '<flag>\n')
inner_indent = indent + ' '
if is_key:
_WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
_WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
# Print flag features that are relevant for all flags.
_WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
if self.short_name:
_WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
inner_indent)
if self.help:
_WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
# The default flag value can either be represented as a string like on the
# command line, or as a Python object. We serialize this value in the
# latter case in order to remain consistent.
if self.serializer and not isinstance(self.default, str):
default_serialized = self.serializer.Serialize(self.default)
else:
default_serialized = self.default
_WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent)
_WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
_WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
# Print extra flag features this flag may have.
self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
outfile.write(indent + '</flag>\n')
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
"""Writes extra info about this flag, in XML format.
"Extra" means "not already printed by WriteInfoInXMLFormat above."
Args:
outfile: File object we write to.
indent: A string that is prepended to each generated line.
"""
# Usually, the parser knows the extra details about the flag, so
# we just forward the call to it.
self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
# End of Flag definition
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __call__(mcs, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for mcs with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
args: Positional initializer arguments.
kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(mcs, *args, **kwargs)
else:
instances = mcs._instances
key = (mcs,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(mcs, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(mcs, *args)
class ArgumentParser(object):
"""Base class used to parse and convert arguments.
The Parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
__metaclass__ = _ArgumentParserCache
syntactic_help = ""
def Parse(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
def Type(self):
return 'string'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
pass
class ArgumentSerializer:
"""Base class for generating string representations of a flag value."""
def Serialize(self, value):
return _StrOrUnicode(value)
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def Serialize(self, value):
return self.list_sep.join([_StrOrUnicode(x) for x in value])
# Flags validators
def RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: string, name of the flag to be checked.
checker: method to validate the flag.
input - value of the corresponding flag (string, boolean, etc.
This value will be passed to checker by the library). See file's
docstring for examples.
output - Boolean.
Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise gflags_validators.Error(desired_error_message).
message: error text to be shown to the user if checker returns False.
If checker raises gflags_validators.Error, message from the raised
Error will be shown.
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
flag_values.AddValidator(gflags_validators.SimpleValidator(flag_name,
checker,
message))
def MarkFlagAsRequired(flag_name, flag_values=FLAGS):
"""Ensure that flag is not None during program execution.
Registers a flag validator, which will follow usual validator
rules.
Args:
flag_name: string, name of the flag
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
RegisterValidator(flag_name,
lambda value: value is not None,
message='Flag --%s must be specified.' % flag_name,
flag_values=flag_values)
def _RegisterBoundsValidatorIfNeeded(parser, name, flag_values):
"""Enforce lower and upper bounds for numeric flags.
Args:
parser: NumericParser (either FloatParser or IntegerParser). Provides lower
and upper bounds, and help text to display.
name: string, name of the flag
flag_values: FlagValues
"""
if parser.lower_bound is not None or parser.upper_bound is not None:
def Checker(value):
if value is not None and parser.IsOutsideBounds(value):
message = '%s is not %s' % (value, parser.syntactic_help)
raise gflags_validators.Error(message)
return True
RegisterValidator(name,
Checker,
flag_values=flag_values)
# The DEFINE functions are explained in mode details in the module doc string.
def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
**args):
"""Registers a generic Flag object.
NOTE: in the docstrings of all DEFINE* functions, "registers" is short
for "creates a new flag and registers it".
Auxiliary function: clients should use the specialized DEFINE_<type>
function instead.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object the flag will be registered with.
serializer: ArgumentSerializer that serializes the flag value.
args: Dictionary with extra keyword args that are passes to the
Flag __init__.
"""
DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_flag(flag, flag_values=FLAGS):
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
"""
# copying the reference to flag_values prevents pychecker warnings
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if isinstance(flag_values, FlagValues):
# Regarding the above isinstance test: some users pass funny
# values of flag_values (e.g., {}) in order to avoid the flag
# registration (in the past, there used to be a flag_values ==
# FLAGS test here) and redefine flags with the same name (e.g.,
# debug). To avoid breaking their code, we perform the
# registration only if flag_values is a real FlagValues object.
module, module_name = _GetCallingModuleObjectAndName()
flag_values._RegisterFlagByModule(module_name, flag)
flag_values._RegisterFlagByModuleId(id(module), flag)
def _InternalDeclareKeyFlags(flag_names,
flag_values=FLAGS, key_flag_values=None):
"""Declares a flag as key for the calling module.
Internal function. User code should call DECLARE_key_flag or
ADOPT_module_key_flags instead.
Args:
flag_names: A list of strings that are names of already-registered
Flag objects.
flag_values: A FlagValues object that the flags listed in
flag_names have registered with (the value of the flag_values
argument from the DEFINE_* calls that defined those flags).
This should almost never need to be overridden.
key_flag_values: A FlagValues object that (among possibly many
other things) keeps track of the key flags for each module.
Default None means "same as flag_values". This should almost
never need to be overridden.
Raises:
UnrecognizedFlagError: when we refer to a flag that was not
defined yet.
"""
key_flag_values = key_flag_values or flag_values
module = _GetCallingModule()
for flag_name in flag_names:
if flag_name not in flag_values:
raise UnrecognizedFlagError(flag_name)
flag = flag_values.FlagDict()[flag_name]
key_flag_values._RegisterKeyFlagForModule(module, flag)
def DECLARE_key_flag(flag_name, flag_values=FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
gflags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden.
"""
if flag_name in _SPECIAL_FLAGS:
# Take care of the special flags, e.g., --flagfile, --undefok.
# These flags are defined in _SPECIAL_FLAGS, and are treated
# specially during flag parsing, taking precedence over the
# user-defined flags.
_InternalDeclareKeyFlags([flag_name],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
return
_InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
def ADOPT_module_key_flags(module, flag_values=FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: A module object.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
FlagsError: When given an argument that is a module name (a
string), instead of a module object.
"""
# NOTE(salcianu): an even better test would be if not
# isinstance(module, types.ModuleType) but I didn't want to import
# types for such a tiny use.
if isinstance(module, str):
raise FlagsError('Received module name %s; expected a module object.'
% module)
_InternalDeclareKeyFlags(
[f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
flag_values=flag_values)
# If module is this flag module, take _SPECIAL_FLAGS into account.
if module == _GetThisModuleObjectAndName()[0]:
_InternalDeclareKeyFlags(
# As we associate flags with _GetCallingModuleObjectAndName(), the
# special flags defined in this module are incorrectly registered with
# a different module. So, we can't use _GetKeyFlagsForModule.
# Instead, we take all flags from _SPECIAL_FLAGS (a private
# FlagValues, where no other module should register flags).
[f.name for f in _SPECIAL_FLAGS.FlagDict().values()],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
#
# STRING FLAGS
#
def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be any string."""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# BOOLEAN FLAGS
#
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def Convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if type(argument) == str:
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def Parse(self, argument):
val = self.Convert(argument)
return val
def Type(self):
return 'bool'
class BooleanFlag(Flag):
"""Basic boolean flag.
Boolean flags do not take any arguments, and their value is either
True (1) or False (0). The false value is specified on the command
line by prepending the word 'no' to either the long or the short flag
name.
For example, if a Boolean flag was created whose long name was
'update' and whose short name was 'x', then this flag could be
explicitly unset through either --noupdate or --nox.
"""
def __init__(self, name, default, help, short_name=None, **args):
p = BooleanParser()
Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
if not self.help: self.help = "a boolean value"
def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
"""Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
"""
DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
# Match C++ API to unconfuse C++ people.
DEFINE_bool = DEFINE_boolean
class HelpFlag(BooleanFlag):
"""
HelpFlag is a special boolean flag that prints usage information and
raises a SystemExit exception if it is ever found in the command
line arguments. Note this is called with allow_override=1, so other
apps can define their own --help flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "help", 0, "show this help",
short_name="?", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = str(FLAGS)
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
class HelpXMLFlag(BooleanFlag):
"""Similar to HelpFlag, but generates output in XML format."""
def __init__(self):
BooleanFlag.__init__(self, 'helpxml', False,
'like --help, but generates XML output',
allow_override=1)
def Parse(self, arg):
if arg:
FLAGS.WriteHelpInXMLFormat(sys.stdout)
sys.exit(1)
class HelpshortFlag(BooleanFlag):
"""
HelpshortFlag is a special boolean flag that prints usage
information for the "main" module, and rasies a SystemExit exception
if it is ever found in the command line arguments. Note this is
called with allow_override=1, so other apps can define their own
--helpshort flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "helpshort", 0,
"show usage only for this module", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = FLAGS.MainModuleHelp()
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
#
# Numeric parser - base class for Integer and Float parsers
#
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def IsOutsideBounds(self, val):
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def Parse(self, argument):
val = self.Convert(argument)
if self.IsOutsideBounds(val):
raise ValueError("%s is not %s" % (val, self.syntactic_help))
return val
def WriteCustomInfoInXMLFormat(self, outfile, indent):
if self.lower_bound is not None:
_WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
if self.upper_bound is not None:
_WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
def Convert(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
# End of Numeric Parser
#
# FLOAT FLAGS
#
class FloatParser(NumericParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "a"
number_name = "number"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def Type(self):
return 'float'
# End of FloatParser
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be a float.
If lower_bound or upper_bound are set, then this flag must be
within the given range.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# INTEGER FLAGS
#
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "an"
number_name = "integer"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = "a positive %s" % self.number_name
elif upper_bound == -1:
sh = "a negative %s" % self.number_name
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
__pychecker__ = 'no-returnvalues'
if type(argument) == str:
base = 10
if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
base = 16
return int(argument, base)
else:
return int(argument)
def Type(self):
return 'int'
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# ENUM FLAGS
#
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None):
super(EnumParser, self).__init__()
self.enum_values = enum_values
def Parse(self, argument):
if self.enum_values and argument not in self.enum_values:
raise ValueError("value should be one of <%s>" %
"|".join(self.enum_values))
return argument
def Type(self):
return 'string enum'
class EnumFlag(Flag):
"""Basic enum flag; its value can be any string from list of enum_values."""
def __init__(self, name, default, help, enum_values=None,
short_name=None, **args):
enum_values = enum_values or []
p = EnumParser(enum_values)
g = ArgumentSerializer()
Flag.__init__(self, p, g, name, default, help, short_name, **args)
if not self.help: self.help = "an enum string"
self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
for enum_value in self.parser.enum_values:
_WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
**args):
"""Registers a flag whose value can be any string from enum_values."""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values)
#
# LIST FLAGS
#
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = "a %s separated list" % self._name
def Parse(self, argument):
if isinstance(argument, list):
return argument
elif argument == '':
return []
else:
return [s.strip() for s in argument.split(self._token)]
def Type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
_WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, None, 'whitespace')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
separators = list(string.whitespace)
separators.sort()
for ws_char in string.whitespace:
_WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings."""
parser = ListParser()
serializer = ListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a whitespace-separated list of strings.
Any whitespace can be used as a separator.
"""
parser = WhitespaceSeparatedListParser()
serializer = ListSerializer(' ')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# MULTI FLAGS
#
class MultiFlag(Flag):
"""A flag that can appear multiple time on the command-line.
The value of such a flag is a list that contains the individual values
from all the appearances of that flag on the command-line.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* The default value may be either a single value or a list of values.
A single value is interpreted as the [value] singleton list.
* The value of the flag is always a list, even if the option was
only supplied once, and even if the default value is a single
value
"""
def __init__(self, *args, **kwargs):
Flag.__init__(self, *args, **kwargs)
self.help += ';\n repeat this option to specify a list of values'
def Parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values
def Serialize(self):
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
if self.value is None:
return ''
s = ''
multi_value = self.value
for self.value in multi_value:
if s: s += ' '
s += Flag.Serialize(self)
self.value = multi_value
return s
def Type(self):
return 'multi ' + self.parser.Type()
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
**args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of any strings.
Use the flag on the command line multiple times to place multiple
string values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary integers.
Use the flag on the command line multiple times to place multiple
integer values into the list. The 'default' may be a single integer
(which will be converted into a single-element list) or a list of
integers.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary floats.
Use the flag on the command line multiple times to place multiple
float values into the list. The 'default' may be a single float
(which will be converted into a single-element list) or a list of
floats.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
# Now register the flags that we want to exist in all applications.
# These are all defined with allow_override=1, so user-apps can use
# these flagnames for their own purposes, if they want.
DEFINE_flag(HelpFlag())
DEFINE_flag(HelpshortFlag())
DEFINE_flag(HelpXMLFlag())
# Define special flags here so that help may be generated for them.
# NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module.
_SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', "",
"Insert flag definitions from the given file into the command line.",
_SPECIAL_FLAGS)
DEFINE_string(
'undefok', "",
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
"with that name. IMPORTANT: flags in this list that have "
"arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
| Python |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in gflags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use gflags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: gflags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import httplib2
import logging
import pickle
import time
import clientsecrets
from anyjson import simplejson
from client import AccessTokenRefreshError
from client import AssertionCredentials
from client import Credentials
from client import Flow
from client import OAuth2WebServerFlow
from client import Storage
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.api import app_identity
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
pass
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to Google
and other OAuth 2.0 servers that can verify assertions. It can be used for
the purpose of accessing data stored under an account assigned to the App
Engine application itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or list of strings, scope(s) of the credentials being requested.
"""
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
super(AppAssertionCredentials, self).__init__(
None,
None,
None)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its own
caching we can skip all the storage hoops and just to a refresh using the
API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
(token, _) = app_identity.get_access_token(self.scope)
except app_identity.Error, e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retreival of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logging.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logging.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logging.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
#if value is not None and not isinstance(value, Credentials):
# return None
return value
class StorageByKeyName(Storage):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name, cache=None):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
cache: memcache, a write-through cache to put in front of the datastore
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
if self._cache:
json = self._cache.get(self._key_name)
if json:
return Credentials.new_from_json(json)
credential = None
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
credential = getattr(entity, self._property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
if self._cache:
self._cache.set(self._key_name, credential.to_json())
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
entity.delete()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
Example:
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, client_id, client_secret, scope,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
user_agent=None,
message=None, **kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
user_agent: string, User agent of your application, default to None.
message: Message to display if there are problems with the OAuth 2.0
configuration. The message may contain HTML and will be presented on the
web interface for any method that uses the decorator.
**kwargs: dict, Keyword arguments are be passed along as kwargs to the
OAuth2WebServerFlow constructor.
"""
self.flow = OAuth2WebServerFlow(client_id, client_secret, scope, user_agent,
auth_uri, token_uri, **kwargs)
self.credentials = None
self._request_handler = None
self._message = message
self._in_error = False
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(self._message)
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
return check_oauth
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
method(request_handler, *args, **kwargs)
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
callback = self._request_handler.request.relative_url('/oauth2callback')
url = self.flow.step1_get_authorize_url(callback)
user = users.get_current_user()
memcache.set(user.user_id(), pickle.dumps(self.flow),
namespace=OAUTH2CLIENT_NAMESPACE)
return str(url)
def http(self):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
"""
return self.credentials.authorize(httplib2.Http())
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
Example:
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def __init__(self, filename, scope, message=None):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename)
if client_type not in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
raise InvalidClientSecretsError('OAuth2Decorator doesn\'t support this OAuth 2.0 flow.')
super(OAuth2DecoratorFromClientSecrets,
self).__init__(
client_info['client_id'],
client_info['client_secret'],
scope,
client_info['auth_uri'],
client_info['token_uri'],
message)
except clientsecrets.InvalidClientSecretsError:
self._in_error = True
if message is not None:
self._message = message
else:
self._message = "Please configure your application for OAuth 2.0"
def oauth2decorator_from_clientsecrets(filename, scope, message=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope, message)
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' % errormsg)
else:
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id(),
namespace=OAUTH2CLIENT_NAMESPACE))
# This code should be ammended with application specific error
# handling. The following cases should be considered:
# 1. What if the flow doesn't exist in memcache? Or is corrupt?
# 2. What if the step2_exchange fails?
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').put(credentials)
self.redirect(str(self.request.get('state')))
else:
# TODO Add error handling here.
pass
application = webapp.WSGIApplication([('/oauth2callback', OAuth2Handler)])
def main():
run_wsgi_app(application)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import logging
import time
from OpenSSL import crypto
from anyjson import simplejson
CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
class AppIdentityError(Exception):
pass
class Verifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey, The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was singed by the private key associated with the public
key that this object was constructed with.
"""
try:
crypto.verify(self._pubkey, signature, message, 'sha256')
return True
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
OpenSSL.crypto.Error if the key_pem can't be parsed.
"""
if is_x509_cert:
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
else:
pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
return Verifier(pubkey)
class Signer(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey, The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
return crypto.sign(self._key, message, 'sha256')
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in P12 format.
password: string, password for the private key file.
Returns:
Signer instance.
Raises:
OpenSSL.crypto.Error if the key can't be parsed.
"""
pkey = crypto.load_pkcs12(key, password).get_privatekey()
return Signer(pkey)
def _urlsafe_b64encode(raw_bytes):
return base64.urlsafe_b64encode(raw_bytes).rstrip('=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _json_encode(data):
return simplejson.dumps(data, separators = (',', ':'))
def make_signed_jwt(signer, payload):
"""Make a signed JWT.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
signer: crypt.Signer, Cryptographic signer.
payload: dict, Dictionary of data to convert to JSON and then sign.
Returns:
string, The JWT for the payload.
"""
header = {'typ': 'JWT', 'alg': 'RS256'}
segments = [
_urlsafe_b64encode(_json_encode(header)),
_urlsafe_b64encode(_json_encode(payload)),
]
signing_input = '.'.join(segments)
signature = signer.sign(signing_input)
segments.append(_urlsafe_b64encode(signature))
logging.debug(str(segments))
return '.'.join(segments)
def verify_signed_jwt_with_certs(jwt, certs, audience):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
certs: dict, Dictionary where values of public keys in PEM format.
audience: string, The audience, 'aud', that this JWT should contain. If
None then the JWT's 'aud' parameter is not verified.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError if any checks are failed.
"""
segments = jwt.split('.')
if (len(segments) != 3):
raise AppIdentityError(
'Wrong number of segments in token: %s' % jwt)
signed = '%s.%s' % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = simplejson.loads(json_body)
except:
raise AppIdentityError('Can\'t parse token: %s' % json_body)
# Check signature.
verified = False
for (keyname, pem) in certs.items():
verifier = Verifier.from_string(pem, True)
if (verifier.verify(signed, signature)):
verified = True
break
if not verified:
raise AppIdentityError('Invalid token signature: %s' % jwt)
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise AppIdentityError('No iat field in token: %s' % json_body)
earliest = iat - CLOCK_SKEW_SECS
# Check expiration timestamp.
now = long(time.time())
exp = parsed.get('exp')
if exp is None:
raise AppIdentityError('No exp field in token: %s' % json_body)
if exp >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError(
'exp field too far in future: %s' % json_body)
latest = exp + CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, %d < %d: %s' %
(now, earliest, json_body))
if now > latest:
raise AppIdentityError('Token used too late, %d > %d: %s' %
(now, latest, json_body))
# Check audience.
if audience is not None:
aud = parsed.get('aud')
if aud is None:
raise AppIdentityError('No aud field in token: %s' % json_body)
if aud != audience:
raise AppIdentityError('Wrong recipient, %s != %s: %s' %
(aud, audience, json_body))
return parsed
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
"""Locked file interface that should work on Unix and Windows pythons.
This module first tries to use fcntl locking to ensure serialized access
to a file, then falls back on a lock file if that is unavialable.
Usage:
f = LockedFile('filename', 'r+b', 'rb')
f.open_and_lock()
if f.is_locked():
print 'Acquired filename with r+b mode'
f.file_handle().write('locked data')
else:
print 'Aquired filename with rb mode'
f.unlock_and_close()
"""
__author__ = 'cache@google.com (David T McWherter)'
import errno
import logging
import os
import time
logger = logging.getLogger(__name__)
class AlreadyLockedException(Exception):
"""Trying to lock a file that has already been locked by the LockedFile."""
pass
class _Opener(object):
"""Base class for different locking primitives."""
def __init__(self, filename, mode, fallback_mode):
"""Create an Opener.
Args:
filename: string, The pathname of the file.
mode: string, The preferred mode to access the file with.
fallback_mode: string, The mode to use if locking fails.
"""
self._locked = False
self._filename = filename
self._mode = mode
self._fallback_mode = fallback_mode
self._fh = None
def is_locked(self):
"""Was the file locked."""
return self._locked
def file_handle(self):
"""The file handle to the file. Valid only after opened."""
return self._fh
def filename(self):
"""The filename that is being locked."""
return self._filename
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
"""
pass
def unlock_and_close(self):
"""Unlock and close the file."""
pass
class _PosixOpener(_Opener):
"""Lock files using Posix advisory lock files."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Tries to create a .lock file next to the file we're trying to open.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
self._locked = False
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
lock_filename = self._posix_lockfile(self._filename)
start_time = time.time()
while True:
try:
self._lock_fd = os.open(lock_filename,
os.O_CREAT|os.O_EXCL|os.O_RDWR)
self._locked = True
break
except OSError, e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= timeout:
logger.warn('Could not acquire lock %s in %s seconds' % (
lock_filename, timeout))
# Close the file and open in fallback_mode.
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Unlock a file by removing the .lock file, and close the handle."""
if self._locked:
lock_filename = self._posix_lockfile(self._filename)
os.unlink(lock_filename)
os.close(self._lock_fd)
self._locked = False
self._lock_fd = None
if self._fh:
self._fh.close()
def _posix_lockfile(self, filename):
"""The name of the lock file to use for posix locking."""
return '%s.lock' % filename
try:
import fcntl
class _FcntlOpener(_Opener):
"""Open, lock, and unlock a file using fcntl.lockf."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)
self._locked = True
return
except IOError, e:
# If not retrying, then just pass on the error.
if timeout == 0:
raise e
if e.errno != errno.EACCES:
raise e
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the fcntl.lockf primitive."""
if self._locked:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN)
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_FcntlOpener = None
try:
import pywintypes
import win32con
import win32file
class _Win32Opener(_Opener):
"""Open, lock, and unlock a file using windows primitives."""
# Error #33:
# 'The process cannot access the file because another process'
FILE_IN_USE_ERROR = 33
# Error #158:
# 'The segment is already unlocked.'
FILE_ALREADY_UNLOCKED_ERROR = 158
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
try:
self._fh = open(self._filename, self._mode)
except IOError, e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.LockFileEx(
hfile,
(win32con.LOCKFILE_FAIL_IMMEDIATELY|
win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000,
pywintypes.OVERLAPPED())
self._locked = True
return
except pywintypes.error, e:
if timeout == 0:
raise e
# If the error is not that the file is already in use, raise.
if e[0] != _Win32Opener.FILE_IN_USE_ERROR:
raise
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the win32 primitive."""
if self._locked:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.UnlockFileEx(hfile, 0, -0x10000, pywintypes.OVERLAPPED())
except pywintypes.error, e:
if e[0] != _Win32Opener.FILE_ALREADY_UNLOCKED_ERROR:
raise
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_Win32Opener = None
class LockedFile(object):
"""Represent a file that has exclusive access."""
def __init__(self, filename, mode, fallback_mode, use_native_locking=True):
"""Construct a LockedFile.
Args:
filename: string, The path of the file to open.
mode: string, The mode to try to open the file with.
fallback_mode: string, The mode to use if locking fails.
use_native_locking: bool, Whether or not fcntl/win32 locking is used.
"""
opener = None
if not opener and use_native_locking:
if _Win32Opener:
opener = _Win32Opener(filename, mode, fallback_mode)
if _FcntlOpener:
opener = _FcntlOpener(filename, mode, fallback_mode)
if not opener:
opener = _PosixOpener(filename, mode, fallback_mode)
self._opener = opener
def filename(self):
"""Return the filename we were constructed with."""
return self._opener._filename
def file_handle(self):
"""Return the file_handle to the opened file."""
return self._opener.file_handle()
def is_locked(self):
"""Return whether we successfully locked the file."""
return self._opener.is_locked()
def open_and_lock(self, timeout=0, delay=0.05):
"""Open the file, trying to lock it.
Args:
timeout: float, The number of seconds to try to acquire the lock.
delay: float, The number of seconds to wait between retry attempts.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
self._opener.open_and_lock(timeout, delay)
def unlock_and_close(self):
"""Unlock and close a file."""
self._opener.unlock_and_close()
| Python |
# Copyright 2011 Google Inc. All Rights Reserved.
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so:
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
__author__ = 'jbeda@google.com (Joe Beda)'
import base64
import errno
import logging
import os
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
from locked_file import LockedFile
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
pass
class NewerCredentialStoreError(Error):
"""The credential store is a newer version that supported."""
pass
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or list of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
filename = os.path.realpath(os.path.expanduser(filename))
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly))
finally:
_multistores_lock.release()
if type(scope) is list:
scope = ' '.join(scope)
return multistore._get_storage(client_id, user_agent, scope)
class _MultiStore(object):
"""A file backed store for multiple credentials."""
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+b', 'rb')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# (client_id, user_agent, scope) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, client_id, user_agent, scope):
self._multistore = multistore
self._client_id = client_id
self._user_agent = user_agent
self._scope = scope
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(
self._client_id, self._user_agent, self._scope)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(credentials, self._scope)
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._delete_credential(self._client_id, self._user_agent,
self._scope)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._file.filename()):
old_umask = os.umask(0177)
try:
open(self._file.filename(), 'a+b').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
self._file.open_and_lock()
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
self._file.unlock_and_close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file.file_handle().seek(0)
return simplejson.load(self._file.file_handle())
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file.file_handle().seek(0)
simplejson.dump(data, self._file.file_handle(), sort_keys=True, indent=2)
self._file.file_handle().truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
client_id = raw_key['clientId']
user_agent = raw_key['userAgent']
scope = raw_key['scope']
key = (client_id, user_agent, scope)
credential = None
credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in self._data.items():
raw_key = {
'clientId': cred_key[0],
'userAgent': cred_key[1],
'scope': cred_key[2]
}
raw_cred = simplejson.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_credential(self, client_id, user_agent, scope):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
The credential specified or None if not present
"""
key = (client_id, user_agent, scope)
return self._data.get(key, None)
def _update_credential(self, cred, scope):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
cred: The OAuth2Credential to update/set
scope: The scope(s) that this credential covers
"""
key = (cred.client_id, cred.user_agent, scope)
self._data[key] = cred
self._write()
def _delete_credential(self, client_id, user_agent, scope):
"""Delete a credential and write the multistore.
This must be called when the multistore is locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: The scope(s) that this credential covers
"""
key = (client_id, user_agent, scope)
try:
del self._data[key]
except KeyError:
pass
self._write()
def _get_storage(self, client_id, user_agent, scope):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, client_id, user_agent, scope)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
try: # pragma: no cover
# Should work for Python2.6 and higher.
import json as simplejson
except ImportError: # pragma: no cover
try:
import simplejson
except ImportError:
# Try to import from django, should work on App Engine
from django.utils import simplejson
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['run']
import BaseHTTPServer
import gflags
import socket
import sys
import webbrowser
from client import FlowExchangeError
from client import OOB_CALLBACK_URN
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage, http=None):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if not success:
print 'Failed to start a local webserver listening on either port 8080'
print 'or port 9090. Please check your firewall settings and locally'
print 'running programs that may be blocking or using those ports.'
print
print 'Falling back to --noauth_local_webserver and continuing with',
print 'authorization.'
print
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = OOB_CALLBACK_URN
authorize_url = flow.step1_get_authorize_url(oauth_callback)
if FLAGS.auth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print 'Your browser has been opened to visit:'
print
print ' ' + authorize_url
print
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter '
print
print ' --noauth_local_webserver'
print
else:
print 'Go to the following link in your browser:'
print
print ' ' + authorize_url
print
code = None
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print 'Failed to find "code" in the query parameters of the redirect.'
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http)
except FlowExchangeError, e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print 'Authentication successful.'
return credential
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth 2.0 client.
Tools for interacting with OAuth 2.0 protected resources.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import clientsecrets
import copy
import datetime
import httplib2
import logging
import os
import sys
import time
import urllib
import urlparse
from anyjson import simplejson
HAS_OPENSSL = False
try:
from oauth2client.crypt import Signer
from oauth2client.crypt import make_signed_jwt
from oauth2client.crypt import verify_signed_jwt_with_certs
HAS_OPENSSL = True
except ImportError:
pass
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
# Expiry is stored in RFC3339 UTC format
EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# Which certs to use to validate id_tokens received.
ID_TOKEN_VERIFICATON_CERTS = 'https://www.googleapis.com/oauth2/v1/certs'
# Constant to use for the out of band OAuth 2.0 flow.
OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob'
class Error(Exception):
"""Base error for this module."""
pass
class FlowExchangeError(Error):
"""Error trying to exchange an authorization grant for an access token."""
pass
class AccessTokenRefreshError(Error):
"""Error trying to refresh an expired access token."""
pass
class UnknownClientSecretsFlowError(Error):
"""The client secrets file called for an unknown type of OAuth 2.0 flow. """
pass
class AccessTokenCredentialsError(Error):
"""Having only the access_token means no refresh is possible."""
pass
class VerifyJwtTokenError(Error):
"""Could on retrieve certificates for validation."""
pass
def _abstract():
raise NotImplementedError('You need to override this function')
class MemoryCache(object):
"""httplib2 Cache implementation which only caches locally."""
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key)
def set(self, key, value):
self.cache[key] = value
def delete(self, key):
self.cache.pop(key, None)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method that applies the credentials to
an HTTP transport.
Subclasses must also specify a classmethod named 'from_json' that takes a JSON
string as input and returns an instaniated Credentials object.
"""
NON_SERIALIZED_MEMBERS = ['store']
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
_abstract()
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
_abstract()
def _to_json(self, strip):
"""Utility function for creating a JSON representation of an instance of Credentials.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
for member in strip:
if member in d:
del d[member]
if 'token_expiry' in d and isinstance(d['token_expiry'], datetime.datetime):
d['token_expiry'] = d['token_expiry'].strftime(EXPIRY_FORMAT)
# Add in information we will need later to reconsistitue this instance.
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Creating a JSON representation of an instance of Credentials.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a Credentials subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of Credentials that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
try:
m = __import__(module)
except ImportError:
# In case there's an object from the old package structure, update it
module = module.replace('.apiclient', '')
m = __import__(module)
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
return Credentials()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential. This class supports locking
such that multiple processes and threads can operate on a single
store.
"""
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
pass
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
pass
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
_abstract()
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
"""
_abstract()
def get(self):
"""Retrieve credential.
The Storage lock must *not* be held when this is called.
Returns:
oauth2client.client.Credentials
"""
self.acquire_lock()
try:
return self.locked_get()
finally:
self.release_lock()
def put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock()
def delete(self):
"""Delete credential.
Frees any resources associated with storing the credential.
The Storage lock must *not* be held when this is called.
Returns:
None
"""
self.acquire_lock()
try:
return self.locked_delete()
finally:
self.release_lock()
class OAuth2Credentials(Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then adds the OAuth 2.0 access token to each request.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, access_token, client_id, client_secret, refresh_token,
token_expiry, token_uri, user_agent, id_token=None):
"""Create an instance of OAuth2Credentials.
This constructor is not usually called by the user, instead
OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
Args:
access_token: string, access token.
client_id: string, client identifier.
client_secret: string, client secret.
refresh_token: string, refresh token.
token_expiry: datetime, when the access_token expires.
token_uri: string, URI of token endpoint.
user_agent: string, The HTTP User-Agent to provide for this application.
id_token: object, The identity of the resource owner.
Notes:
store: callable, A callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.store = None
self.token_expiry = token_expiry
self.token_uri = token_uri
self.user_agent = user_agent
self.id_token = id_token
# True if the credentials have been revoked or expired and can't be
# refreshed.
self.invalid = False
def authorize(self, http):
"""Authorize an httplib2.Http instance with these credentials.
The modified http.request method will add authentication headers to each
request and will refresh access_tokens when a 401 is received on a
request. In addition the http.request method has a credentials property,
http.request.credentials, which is the Credentials object that authorized
it.
Args:
http: An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth subclass of httplib2.Authenication
because it never gets passed the absolute URI, which is needed for
signing. So instead we have to overload 'request' with a closure
that adds in the Authorization header and then calls the original
version of 'request()'.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if not self.access_token:
logger.info('Attempting refresh to obtain initial access_token')
self._refresh(request_orig)
# Modify the request headers to add the appropriate
# Authorization header.
if headers is None:
headers = {}
self.apply(headers)
if self.user_agent is not None:
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
if resp.status == 401:
logger.info('Refreshing due to a 401')
self._refresh(request_orig)
self.apply(headers)
return request_orig(uri, method, body, headers,
redirections, connection_type)
else:
return (resp, content)
# Replace the request method with our own closure.
http.request = new_request
# Set credentials as a property of the request method.
setattr(http.request, 'credentials', self)
return http
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
self._refresh(http.request)
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
headers['Authorization'] = 'Bearer ' + self.access_token
def to_json(self):
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it. The JSON
should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
data = simplejson.loads(s)
if 'token_expiry' in data and not isinstance(data['token_expiry'],
datetime.datetime):
try:
data['token_expiry'] = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
except:
data['token_expiry'] = None
retval = OAuth2Credentials(
data['access_token'],
data['client_id'],
data['client_secret'],
data['refresh_token'],
data['token_expiry'],
data['token_uri'],
data['user_agent'],
data.get('id_token', None))
retval.invalid = data['invalid']
return retval
@property
def access_token_expired(self):
"""True if the credential is expired or invalid.
If the token_expiry isn't set, we assume the token doesn't expire.
"""
if self.invalid:
return True
if not self.token_expiry:
return False
now = datetime.datetime.utcnow()
if now >= self.token_expiry:
logger.info('access_token is expired. Now: %s, token_expiry: %s',
now, self.token_expiry)
return True
return False
def set_store(self, store):
"""Set the Storage for the credential.
Args:
store: Storage, an implementation of Stroage object.
This is needed to store the latest access_token if it
has expired and been refreshed. This implementation uses
locking to check for updates before updating the
access_token.
"""
self.store = store
def _updateFromCredential(self, other):
"""Update this Credential from another instance."""
self.__dict__.update(other.__getstate__())
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def _generate_refresh_request_body(self):
"""Generate the body that will be used in the refresh request."""
body = urllib.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
})
return body
def _generate_refresh_request_headers(self):
"""Generate the headers that will be used in the refresh request."""
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
return headers
def _refresh(self, http_request):
"""Refreshes the access_token.
This method first checks by reading the Storage object if available.
If a refresh is still needed, it holds the Storage lock until the
refresh is completed.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
if not self.store:
self._do_refresh_request(http_request)
else:
self.store.acquire_lock()
try:
new_cred = self.store.locked_get()
if (new_cred and not new_cred.invalid and
new_cred.access_token != self.access_token):
logger.info('Updated access_token read from Storage')
self._updateFromCredential(new_cred)
else:
self._do_refresh_request(http_request)
finally:
self.store.release_lock()
def _do_refresh_request(self, http_request):
"""Refresh the access_token using the refresh_token.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
body = self._generate_refresh_request_body()
headers = self._generate_refresh_request_headers()
logger.info('Refreshing access_token')
resp, content = http_request(
self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if loads fails?
d = simplejson.loads(content)
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(
seconds=int(d['expires_in'])) + datetime.datetime.utcnow()
else:
self.token_expiry = None
if self.store:
self.store.locked_put(self)
else:
# An {'error':...} response body means the token is expired or revoked,
# so we flag the credentials as such.
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
self.invalid = True
if self.store:
self.store.locked_put(self)
except:
pass
raise AccessTokenRefreshError(error_msg)
class AccessTokenCredentials(OAuth2Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the
authorize() method, which then signs each request from that object
with the OAuth 2.0 access token. This set of credentials is for the
use case where you have acquired an OAuth 2.0 access_token from
another place such as a JavaScript client or another web
application, and wish to use it from Python. Because only the
access_token is present it can not be refreshed and will in time
expire.
AccessTokenCredentials objects may be safely pickled and unpickled.
Usage:
credentials = AccessTokenCredentials('<an access token>',
'my-user-agent/1.0')
http = httplib2.Http()
http = credentials.authorize(http)
Exceptions:
AccessTokenCredentialsExpired: raised when the access_token expires or is
revoked.
"""
def __init__(self, access_token, user_agent):
"""Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
"""
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent)
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = AccessTokenCredentials(
data['access_token'],
data['user_agent'])
return retval
def _refresh(self, http_request):
raise AccessTokenCredentialsError(
"The access_token is expired or invalid and can't be refreshed.")
class AssertionCredentials(OAuth2Credentials):
"""Abstract Credentials object used for OAuth 2.0 assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens. It must
be subclassed to generate the appropriate assertion string.
AssertionCredentials objects may be safely pickled and unpickled.
"""
def __init__(self, assertion_type, user_agent,
token_uri='https://accounts.google.com/o/oauth2/token',
**unused_kwargs):
"""Constructor for AssertionFlowCredentials.
Args:
assertion_type: string, assertion type that will be declared to the auth
server
user_agent: string, The HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
"""
super(AssertionCredentials, self).__init__(
None,
None,
None,
None,
None,
token_uri,
user_agent)
self.assertion_type = assertion_type
def _generate_refresh_request_body(self):
assertion = self._generate_assertion()
body = urllib.urlencode({
'assertion_type': self.assertion_type,
'assertion': assertion,
'grant_type': 'assertion',
})
return body
def _generate_assertion(self):
"""Generate the assertion string that will be used in the access token
request.
"""
_abstract()
if HAS_OPENSSL:
# PyOpenSSL is not a prerequisite for oauth2client, so if it is missing then
# don't create the SignedJwtAssertionCredentials or the verify_id_token()
# method.
class SignedJwtAssertionCredentials(AssertionCredentials):
"""Credentials object used for OAuth 2.0 Signed JWT assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
"""
MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
def __init__(self,
service_account_name,
private_key,
scope,
private_key_password='notasecret',
user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for SignedJwtAssertionCredentials.
Args:
service_account_name: string, id for account, usually an email address.
private_key: string, private key in P12 format.
scope: string or list of strings, scope(s) of the credentials being
requested.
private_key_password: string, password for private_key.
user_agent: string, HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
kwargs: kwargs, Additional parameters to add to the JWT token, for
example prn=joe@xample.org."""
super(SignedJwtAssertionCredentials, self).__init__(
'http://oauth.net/grant_type/jwt/1.0/bearer',
user_agent,
token_uri=token_uri,
)
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
self.private_key = private_key
self.private_key_password = private_key_password
self.service_account_name = service_account_name
self.kwargs = kwargs
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = SignedJwtAssertionCredentials(
data['service_account_name'],
data['private_key'],
data['private_key_password'],
data['scope'],
data['user_agent'],
data['token_uri'],
data['kwargs']
)
retval.invalid = data['invalid']
return retval
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
now = long(time.time())
payload = {
'aud': self.token_uri,
'scope': self.scope,
'iat': now,
'exp': now + SignedJwtAssertionCredentials.MAX_TOKEN_LIFETIME_SECS,
'iss': self.service_account_name
}
payload.update(self.kwargs)
logger.debug(str(payload))
return make_signed_jwt(
Signer.from_string(self.private_key, self.private_key_password),
payload)
# Only used in verify_id_token(), which is always calling to the same URI
# for the certs.
_cached_http = httplib2.Http(MemoryCache())
def verify_id_token(id_token, audience, http=None,
cert_uri=ID_TOKEN_VERIFICATON_CERTS):
"""Verifies a signed JWT id_token.
Args:
id_token: string, A Signed JWT.
audience: string, The audience 'aud' that the token should be for.
http: httplib2.Http, instance to use to make the HTTP request. Callers
should supply an instance that has caching enabled.
cert_uri: string, URI of the certificates in JSON format to
verify the JWT against.
Returns:
The deserialized JSON in the JWT.
Raises:
oauth2client.crypt.AppIdentityError if the JWT fails to verify.
"""
if http is None:
http = _cached_http
resp, content = http.request(cert_uri)
if resp.status == 200:
certs = simplejson.loads(content)
return verify_signed_jwt_with_certs(id_token, certs, audience)
else:
raise VerifyJwtTokenError('Status code: %d' % resp.status)
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _extract_id_token(id_token):
"""Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
"""
segments = id_token.split('.')
if (len(segments) != 3):
raise VerifyJwtTokenError(
'Wrong number of segments in token: %s' % id_token)
return simplejson.loads(_urlsafe_b64decode(segments[1]))
def credentials_from_code(client_id, client_secret, scope, code,
redirect_uri = 'postmessage',
http=None, user_agent=None,
token_uri='https://accounts.google.com/o/oauth2/token'):
"""Exchanges an authorization code for an OAuth2Credentials object.
Args:
client_id: string, client identifier.
client_secret: string, client secret.
scope: string or list of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
"""
flow = OAuth2WebServerFlow(client_id, client_secret, scope, user_agent,
'https://accounts.google.com/o/oauth2/auth',
token_uri)
# We primarily make this call to set up the redirect_uri in the flow object
uriThatWeDontReallyUse = flow.step1_get_authorize_url(redirect_uri)
credentials = flow.step2_exchange(code, http)
return credentials
def credentials_from_clientsecrets_and_code(filename, scope, code,
message = None,
redirect_uri = 'postmessage',
http=None):
"""Returns OAuth2Credentials from a clientsecrets file and an auth code.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of clientsecrets.
scope: string or list of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
flow = flow_from_clientsecrets(filename, scope, message)
# We primarily make this call to set up the redirect_uri in the flow object
uriThatWeDontReallyUse = flow.step1_get_authorize_url(redirect_uri)
credentials = flow.step2_exchange(code, http)
return credentials
class OAuth2WebServerFlow(Flow):
"""Does the Web Server Flow for OAuth 2.0.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, client_id, client_secret, scope, user_agent=None,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for OAuth2WebServerFlow.
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
**kwargs: dict, The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.client_id = client_id
self.client_secret = client_secret
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.params = {
'access_type': 'offline',
}
self.params.update(kwargs)
self.redirect_uri = None
def step1_get_authorize_url(self, redirect_uri=OOB_CALLBACK_URN):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server.
If redirect_uri is 'urn:ietf:wg:oauth:2.0:oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
self.redirect_uri = redirect_uri
query = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': redirect_uri,
'scope': self.scope,
}
query.update(self.params)
parts = list(urlparse.urlparse(self.auth_uri))
query.update(dict(parse_qsl(parts[4]))) # 4 is the index of the query part
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
def step2_exchange(self, code, http=None):
"""Exhanges a code for OAuth2Credentials.
Args:
code: string or dict, either the code as a string, or a dictionary
of the query parameters to the redirect_uri, which contains
the code.
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object that can be used to authorize requests.
Raises:
FlowExchangeError if a problem occured exchanging the code for a
refresh_token.
"""
if not (isinstance(code, str) or isinstance(code, unicode)):
if 'code' not in code:
if 'error' in code:
error_msg = code['error']
else:
error_msg = 'No code was supplied in the query parameters.'
raise FlowExchangeError(error_msg)
else:
code = code['code']
body = urllib.urlencode({
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
})
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
if http is None:
http = httplib2.Http()
resp, content = http.request(self.token_uri, method='POST', body=body,
headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if simplejson.loads fails?
d = simplejson.loads(content)
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
token_expiry = None
if 'expires_in' in d:
token_expiry = datetime.datetime.utcnow() + datetime.timedelta(
seconds=int(d['expires_in']))
if 'id_token' in d:
d['id_token'] = _extract_id_token(d['id_token'])
logger.info('Successfully retrieved access token: %s' % content)
return OAuth2Credentials(access_token, self.client_id,
self.client_secret, refresh_token, token_expiry,
self.token_uri, self.user_agent,
id_token=d.get('id_token', None))
else:
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
except:
pass
raise FlowExchangeError(error_msg)
def flow_from_clientsecrets(filename, scope, message=None):
"""Create a Flow from a clientsecrets file.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) to request.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
Returns:
A Flow object.
Raises:
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename)
if client_type in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
return OAuth2WebServerFlow(
client_info['client_id'],
client_info['client_secret'],
scope,
None, # user_agent
client_info['auth_uri'],
client_info['token_uri'])
except clientsecrets.InvalidClientSecretsError:
if message:
sys.exit(message)
else:
raise
else:
raise UnknownClientSecretsFlowError(
'This OAuth 2.0 flow is unsupported: "%s"' * client_type)
| Python |
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading OAuth 2.0 client secret files.
A client_secrets.json file contains all the information needed to interact with
an OAuth 2.0 protected service.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from anyjson import simplejson
# Properties that make a client_secrets.json file valid.
TYPE_WEB = 'web'
TYPE_INSTALLED = 'installed'
VALID_CLIENT = {
TYPE_WEB: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri'],
'string': [
'client_id',
'client_secret'
]
},
TYPE_INSTALLED: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri'],
'string': [
'client_id',
'client_secret'
]
}
}
class Error(Exception):
"""Base error for this module."""
pass
class InvalidClientSecretsError(Error):
"""Format of ClientSecrets file is invalid."""
pass
def _validate_clientsecrets(obj):
if obj is None or len(obj) != 1:
raise InvalidClientSecretsError('Invalid file format.')
client_type = obj.keys()[0]
if client_type not in VALID_CLIENT.keys():
raise InvalidClientSecretsError('Unknown client type: %s.' % client_type)
client_info = obj[client_type]
for prop_name in VALID_CLIENT[client_type]['required']:
if prop_name not in client_info:
raise InvalidClientSecretsError(
'Missing property "%s" in a client type of "%s".' % (prop_name,
client_type))
for prop_name in VALID_CLIENT[client_type]['string']:
if client_info[prop_name].startswith('[['):
raise InvalidClientSecretsError(
'Property "%s" is not configured.' % prop_name)
return client_type, client_info
def load(fp):
obj = simplejson.load(fp)
return _validate_clientsecrets(obj)
def loads(s):
obj = simplejson.loads(s)
return _validate_clientsecrets(obj)
def loadfile(filename):
try:
fp = file(filename, 'r')
try:
obj = simplejson.load(fp)
finally:
fp.close()
except IOError:
raise InvalidClientSecretsError('File not found: "%s"' % filename)
return _validate_clientsecrets(obj)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 2.0
credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import os
import stat
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant."""
self._lock.acquire()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._lock.release()
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
try:
f = open(self._filename, 'rb')
content = f.read()
f.close()
except IOError:
return credentials
try:
credentials = Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._filename):
old_umask = os.umask(0177)
try:
open(self._filename, 'a+b').close()
finally:
os.umask(old_umask)
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._create_file_if_needed()
f = open(self._filename, 'wb')
f.write(credentials.to_json())
f.close()
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
os.unlink(self._filename)
| Python |
__version__ = "1.0c2"
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query).delete()
| Python |
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import StringIO
import base64
import copy
import gzip
import httplib2
import mimeparse
import mimetypes
import os
import urllib
import urlparse
import uuid
from email.generator import Generator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.parser import FeedParser
from errors import BatchError
from errors import HttpError
from errors import ResumableUploadError
from errors import UnexpectedBodyError
from errors import UnexpectedMethodError
from model import JsonModel
from oauth2client.anyjson import simplejson
DEFAULT_CHUNK_SIZE = 512*1024
class MediaUploadProgress(object):
"""Status of a resumable upload."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes sent so far.
total_size: int, total bytes in complete upload, or None if the total
upload size isn't known ahead of time.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of upload completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the upload is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaDownloadProgress(object):
"""Status of a resumable download."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes received so far.
total_size: int, total bytes in complete download.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of download completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the download is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaUpload(object):
"""Describes a media object to upload.
Base class that defines the interface of MediaUpload subclasses.
Note that subclasses of MediaUpload may allow you to control the chunksize
when upload a media object. It is important to keep the size of the chunk as
large as possible to keep the upload efficient. Other factors may influence
the size of the chunk you use, particularly if you are working in an
environment where individual HTTP requests may have a hardcoded time limit,
such as under certain classes of requests under Google App Engine.
"""
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
raise NotImplementedError()
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return 'application/octet-stream'
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return None
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return False
def getbytes(self, begin, end):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
raise NotImplementedError()
def _to_json(self, strip=None):
"""Utility function for creating a JSON representation of a MediaUpload.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
if strip is not None:
for member in strip:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Create a JSON representation of an instance of MediaUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json()
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a MediaUpload subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of MediaUpload that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
class MediaFileUpload(MediaUpload):
"""A MediaUpload for a file.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed uploading images:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals()..insert(
id='cow',
name='cow.png',
media_body=media).execute()
"""
def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._filename = filename
self._size = os.path.getsize(filename)
self._fd = None
if mimetype is None:
(mimetype, encoding) = mimetypes.guess_type(filename)
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
if self._fd is None:
self._fd = open(self._filename, 'rb')
self._fd.seek(begin)
return self._fd.read(length)
def to_json(self):
"""Creating a JSON representation of an instance of MediaFileUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(['_fd'])
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaFileUpload(
d['_filename'], d['_mimetype'], d['_chunksize'], d['_resumable'])
class MediaIoBaseUpload(MediaUpload):
"""A MediaUpload for a io.Base objects.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
fh = io.BytesIO('...Some data to upload...')
media = MediaIoBaseUpload(fh, mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
"""
def __init__(self, fh, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
fh: io.Base or file object, The source of the bytes to upload. MUST be
opened in blocking mode, do not use streams opened in non-blocking mode.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._fh = fh
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
self._size = None
try:
if hasattr(self._fh, 'fileno'):
fileno = self._fh.fileno()
# Pipes and such show up as 0 length files.
size = os.fstat(fileno).st_size
if size:
self._size = os.fstat(fileno).st_size
except IOError:
pass
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
self._fh.seek(begin)
return self._fh.read(length)
def to_json(self):
"""This upload type is not serializable."""
raise NotImplementedError('MediaIoBaseUpload is not serializable.')
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method.
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
def to_json(self):
"""Create a JSON representation of a MediaInMemoryUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
del d['_body']
d['_class'] = t.__name__
d['_module'] = t.__module__
d['_b64body'] = base64.b64encode(self._body)
return simplejson.dumps(d)
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaInMemoryUpload(base64.b64decode(d['_b64body']),
d['_mimetype'], d['_chunksize'],
d['_resumable'])
class MediaIoBaseDownload(object):
""""Download media resources.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
Example:
request = farms.animals().get_media(id='cow')
fh = io.FileIO('cow.png', mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print "Download %d%%." % int(status.progress() * 100)
print "Download Complete!"
"""
def __init__(self, fh, request, chunksize=DEFAULT_CHUNK_SIZE):
"""Constructor.
Args:
fh: io.Base or file object, The stream in which to write the downloaded
bytes.
request: apiclient.http.HttpRequest, the media request to perform in
chunks.
chunksize: int, File will be downloaded in chunks of this many bytes.
"""
self.fh_ = fh
self.request_ = request
self.uri_ = request.uri
self.chunksize_ = chunksize
self.progress_ = 0
self.total_size_ = None
self.done_ = False
def next_chunk(self):
"""Get the next chunk of the download.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self.progress_, self.progress_ + self.chunksize_)
}
http = self.request_.http
http.follow_redirects = False
resp, content = http.request(self.uri_, headers=headers)
if resp.status in [301, 302, 303, 307, 308] and 'location' in resp:
self.uri_ = resp['location']
resp, content = http.request(self.uri_, headers=headers)
if resp.status in [200, 206]:
self.progress_ += len(content)
self.fh_.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self.total_size_ = int(length)
if self.progress_ == self.total_size_:
self.done_ = True
return MediaDownloadProgress(self.progress_, self.total_size_), self.done_
else:
raise HttpError(resp, content, self.uri_)
class HttpRequest(object):
"""Encapsulates a single HTTP request."""
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request,
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
resumable: MediaUpload, None if this is not a resumbale request.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.methodId = methodId
self.http = http
self.postproc = postproc
self.resumable = resumable
self._in_error_state = False
# Pull the multipart boundary out of the content-type header.
major, minor, params = mimeparse.parse_mime_type(
headers.get('content-type', 'application/json'))
# The size of the non-media part of the request.
self.body_size = len(self.body or '')
# The resumable URI to send chunks to.
self.resumable_uri = None
# The bytes that have been uploaded.
self.resumable_progress = 0
def execute(self, http=None):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http)
return body
else:
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=self.headers)
if resp.status >= 300:
raise HttpError(resp, content, self.uri)
return self.postproc(resp, content)
def next_chunk(self, http=None):
"""Execute the next step of a resumable upload.
Can only be used if the method being executed supports media uploads and
the MediaUpload object passed in was flagged as using resumable upload.
Example:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1000, resumable=True)
request = farm.animals().insert(
id='cow',
name='cow.png',
media_body=media)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Upload %d%% complete." % int(status.progress() * 100)
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable.size() is None:
size = '*'
else:
size = str(self.resumable.size())
if self.resumable_uri is None:
start_headers = copy.copy(self.headers)
start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
if size != '*':
start_headers['X-Upload-Content-Length'] = size
start_headers['content-length'] = str(self.body_size)
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=start_headers)
if resp.status == 200 and 'location' in resp:
self.resumable_uri = resp['location']
else:
raise ResumableUploadError("Failed to retrieve starting URI.")
elif self._in_error_state:
# If we are in an error state then query the server for current state of
# the upload by sending an empty PUT and reading the 'range' header in
# the response.
headers = {
'Content-Range': 'bytes */%s' % size,
'content-length': '0'
}
resp, content = http.request(self.resumable_uri, 'PUT',
headers=headers)
status, body = self._process_response(resp, content)
if body:
# The upload was complete.
return (status, body)
data = self.resumable.getbytes(
self.resumable_progress, self.resumable.chunksize())
# A short read implies that we are at EOF, so finish the upload.
if len(data) < self.resumable.chunksize():
size = str(self.resumable_progress + len(data))
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
self.resumable_progress, self.resumable_progress + len(data) - 1,
size)
}
try:
resp, content = http.request(self.resumable_uri, 'PUT',
body=data,
headers=headers)
except:
self._in_error_state = True
raise
return self._process_response(resp, content)
def _process_response(self, resp, content):
"""Process the response from a single chunk upload.
Args:
resp: httplib2.Response, the response object.
content: string, the content of the response.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx or a 308.
"""
if resp.status in [200, 201]:
self._in_error_state = False
return None, self.postproc(resp, content)
elif resp.status == 308:
self._in_error_state = False
# A "308 Resume Incomplete" indicates we are not done.
self.resumable_progress = int(resp['range'].split('-')[1]) + 1
if 'location' in resp:
self.resumable_uri = resp['location']
else:
self._in_error_state = True
raise HttpError(resp, content, self.uri)
return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
None)
def to_json(self):
"""Returns a JSON representation of the HttpRequest."""
d = copy.copy(self.__dict__)
if d['resumable'] is not None:
d['resumable'] = self.resumable.to_json()
del d['http']
del d['postproc']
return simplejson.dumps(d)
@staticmethod
def from_json(s, http, postproc):
"""Returns an HttpRequest populated with info from a JSON object."""
d = simplejson.loads(s)
if d['resumable'] is not None:
d['resumable'] = MediaUpload.new_from_json(d['resumable'])
return HttpRequest(
http,
postproc,
uri=d['uri'],
method=d['method'],
body=d['body'],
headers=d['headers'],
methodId=d['methodId'],
resumable=d['resumable'])
class BatchHttpRequest(object):
"""Batches multiple HttpRequest objects into a single HTTP request.
Example:
from apiclient.http import BatchHttpRequest
def list_animals(request_id, response):
\"\"\"Do something with the animals list response.\"\"\"
pass
def list_farmers(request_id, response):
\"\"\"Do something with the farmers list response.\"\"\"
pass
service = build('farm', 'v2')
batch = BatchHttpRequest()
batch.add(service.animals().list(), list_animals)
batch.add(service.farmers().list(), list_farmers)
batch.execute(http)
"""
def __init__(self, callback=None, batch_uri=None):
"""Constructor for a BatchHttpRequest.
Args:
callback: callable, A callback to be called for each response, of the
form callback(id, response). The first parameter is the request id, and
the second is the deserialized response object.
batch_uri: string, URI to send batch requests to.
"""
if batch_uri is None:
batch_uri = 'https://www.googleapis.com/batch'
self._batch_uri = batch_uri
# Global callback to be called for each individual response in the batch.
self._callback = callback
# A map from id to request.
self._requests = {}
# A map from id to callback.
self._callbacks = {}
# List of request ids, in the order in which they were added.
self._order = []
# The last auto generated id.
self._last_auto_id = 0
# Unique ID on which to base the Content-ID headers.
self._base_id = None
# A map from request id to (headers, content) response pairs
self._responses = {}
# A map of id(Credentials) that have been refreshed.
self._refreshed_credentials = {}
def _refresh_and_apply_credentials(self, request, http):
"""Refresh the credentials and apply to the request.
Args:
request: HttpRequest, the request.
http: httplib2.Http, the global http object for the batch.
"""
# For the credentials to refresh, but only once per refresh_token
# If there is no http per the request then refresh the http passed in
# via execute()
creds = None
if request.http is not None and hasattr(request.http.request,
'credentials'):
creds = request.http.request.credentials
elif http is not None and hasattr(http.request, 'credentials'):
creds = http.request.credentials
if creds is not None:
if id(creds) not in self._refreshed_credentials:
creds.refresh(http)
self._refreshed_credentials[id(creds)] = 1
# Only apply the credentials if we are using the http object passed in,
# otherwise apply() will get called during _serialize_request().
if request.http is None or not hasattr(request.http.request,
'credentials'):
creds.apply(request.headers)
def _id_to_header(self, id_):
"""Convert an id to a Content-ID header value.
Args:
id_: string, identifier of individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is prepended to
the value because Content-ID headers are supposed to be universally
unique.
"""
if self._base_id is None:
self._base_id = uuid.uuid4()
return '<%s+%s>' % (self._base_id, urllib.quote(id_))
def _header_to_id(self, header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that _id_to_header()
returns.
Args:
header: string, Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if header[0] != '<' or header[-1] != '>':
raise BatchError("Invalid value for Content-ID: %s" % header)
if '+' not in header:
raise BatchError("Invalid value for Content-ID: %s" % header)
base, id_ = header[1:-1].rsplit('+', 1)
return urllib.unquote(id_)
def _serialize_request(self, request):
"""Convert an HttpRequest object into a string.
Args:
request: HttpRequest, the request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urlparse.urlparse(request.uri)
request_line = urlparse.urlunparse(
(None, None, parsed.path, parsed.params, parsed.query, None)
)
status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
major, minor = request.headers.get('content-type', 'application/json').split('/')
msg = MIMENonMultipart(major, minor)
headers = request.headers.copy()
if request.http is not None and hasattr(request.http.request,
'credentials'):
request.http.request.credentials.apply(headers)
# MIMENonMultipart adds its own Content-Type header.
if 'content-type' in headers:
del headers['content-type']
for key, value in headers.iteritems():
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
msg['content-length'] = str(len(request.body))
# Serialize the mime message.
fp = StringIO.StringIO()
# maxheaderlen=0 means don't line wrap headers.
g = Generator(fp, maxheaderlen=0)
g.flatten(msg, unixfrom=False)
body = fp.getvalue()
# Strip off the \n\n that the MIME lib tacks onto the end of the payload.
if request.body is None:
body = body[:-2]
return status_line.encode('utf-8') + body
def _deserialize_response(self, payload):
"""Convert string into httplib2 response and content.
Args:
payload: string, headers and body as a string.
Returns:
A pair (resp, content) like would be returned from httplib2.request.
"""
# Strip off the status line
status_line, payload = payload.split('\n', 1)
protocol, status, reason = status_line.split(' ', 2)
# Parse the rest of the response
parser = FeedParser()
parser.feed(payload)
msg = parser.close()
msg['status'] = status
# Create httplib2.Response from the parsed headers.
resp = httplib2.Response(msg)
resp.reason = reason
resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
content = payload.split('\r\n\r\n', 1)[1]
return resp, content
def _new_id(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
string, a new unique id.
"""
self._last_auto_id += 1
while str(self._last_auto_id) in self._requests:
self._last_auto_id += 1
return str(self._last_auto_id)
def add(self, request, callback=None, request_id=None):
"""Add a new request.
Every callback added will be paired with a unique id, the request_id. That
unique id will be passed back to the callback when the response comes back
from the server. The default behavior is to have the library generate it's
own unique id. If the caller passes in a request_id then they must ensure
uniqueness for each request_id, and if they are not an exception is
raised. Callers should either supply all request_ids or nevery supply a
request id, to avoid such an error.
Args:
request: HttpRequest, Request to add to the batch.
callback: callable, A callback to be called for this response, of the
form callback(id, response). The first parameter is the request id, and
the second is the deserialized response object.
request_id: string, A unique id for the request. The id will be passed to
the callback with the response.
Returns:
None
Raises:
BatchError if a media request is added to a batch.
KeyError is the request_id is not unique.
"""
if request_id is None:
request_id = self._new_id()
if request.resumable is not None:
raise BatchError("Media requests cannot be used in a batch request.")
if request_id in self._requests:
raise KeyError("A request with this ID already exists: %s" % request_id)
self._requests[request_id] = request
self._callbacks[request_id] = callback
self._order.append(request_id)
def _execute(self, http, order, requests):
"""Serialize batch request, send to server, process response.
Args:
http: httplib2.Http, an http object to be used to make the request with.
order: list, list of request ids in the order they were added to the
batch.
request: list, list of request objects to send.
Raises:
httplib2.Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = MIMEMultipart('mixed')
# Message should not write out it's own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for request_id in order:
request = requests[request_id]
msg = MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._id_to_header(request_id)
body = self._serialize_request(request)
msg.set_payload(body)
message.attach(msg)
body = message.as_string()
headers = {}
headers['content-type'] = ('multipart/mixed; '
'boundary="%s"') % message.get_boundary()
resp, content = http.request(self._batch_uri, 'POST', body=body,
headers=headers)
if resp.status >= 300:
raise HttpError(resp, content, self._batch_uri)
# Now break out the individual responses and store each one.
boundary, _ = content.split(None, 1)
# Prepend with a content-type header so FeedParser can handle it.
header = 'content-type: %s\r\n\r\n' % resp['content-type']
for_parser = header + content
parser = FeedParser()
parser.feed(for_parser)
mime_response = parser.close()
if not mime_response.is_multipart():
raise BatchError("Response not in multipart/mixed format.", resp,
content)
for part in mime_response.get_payload():
request_id = self._header_to_id(part['Content-ID'])
headers, content = self._deserialize_response(part.get_payload())
self._responses[request_id] = (headers, content)
def execute(self, http=None):
"""Execute all the requests as a single batched HTTP request.
Args:
http: httplib2.Http, an http object to be used in place of the one the
HttpRequest request object was constructed with. If one isn't supplied
then use a http object from the requests in this batch.
Returns:
None
Raises:
httplib2.Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
# If http is not supplied use the first valid one given in the requests.
if http is None:
for request_id in self._order:
request = self._requests[request_id]
if request is not None:
http = request.http
break
if http is None:
raise ValueError("Missing a valid http object.")
self._execute(http, self._order, self._requests)
# Loop over all the requests and check for 401s. For each 401 request the
# credentials should be refreshed and then sent again in a separate batch.
redo_requests = {}
redo_order = []
for request_id in self._order:
headers, content = self._responses[request_id]
if headers['status'] == '401':
redo_order.append(request_id)
request = self._requests[request_id]
self._refresh_and_apply_credentials(request, http)
redo_requests[request_id] = request
if redo_requests:
self._execute(http, redo_order, redo_requests)
# Now process all callbacks that are erroring, and raise an exception for
# ones that return a non-2xx response? Or add extra parameter to callback
# that contains an HttpError?
for request_id in self._order:
headers, content = self._responses[request_id]
request = self._requests[request_id]
callback = self._callbacks[request_id]
response = None
exception = None
try:
r = httplib2.Response(headers)
response = request.postproc(r, content)
except HttpError, e:
exception = e
if callback is not None:
callback(request_id, response, exception)
if self._callback is not None:
self._callback(request_id, response, exception)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content, opt_expected_body) that should be
returned when that method is called. None may also be passed in for the
httplib2.Response, in which case a 200 OK response will be generated.
If an opt_expected_body (str or dict) is provided, it will be compared to
the body and UnexpectedBodyError will be raised on inequality.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'plus.activities.get': (None, response),
}
)
apiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content or raise an excpetion
if check_unexpected is set to True. The methodId is taken from the rpcName
in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses, check_unexpected=False):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
check_unexpected - A boolean setting whether or not UnexpectedMethodError
should be raised on unsupplied method.
"""
self.responses = responses
self.check_unexpected = check_unexpected
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None, resumable=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
response = self.responses[methodId]
resp, content = response[:2]
if len(response) > 2:
# Test the body against the supplied expected_body.
expected_body = response[2]
if bool(expected_body) != bool(body):
# Not expecting a body and provided one
# or expecting a body and not provided one.
raise UnexpectedBodyError(expected_body, body)
if isinstance(expected_body, str):
expected_body = simplejson.loads(expected_body)
body = simplejson.loads(body)
if body != expected_body:
raise UnexpectedBodyError(expected_body, body)
return HttpRequestMock(resp, content, postproc)
elif self.check_unexpected:
raise UnexpectedMethodError(methodId)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
f = file(filename, 'r')
self.data = f.read()
f.close()
self.headers = headers
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
return httplib2.Response(self.headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
'echo_request_uri' means return the request uri in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
self.follow_redirects = True
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
content = body
elif content == 'echo_request_uri':
content = uri
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
| Python |
# Copyright (C) 2007 Joe Gregorio
#
# Licensed under the MIT License
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
__version__ = '0.1.3'
__author__ = 'Joe Gregorio'
__email__ = 'joe@bitworking.org'
__license__ = 'MIT License'
__credits__ = ''
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(';')
params = dict([tuple([s.strip() for s in param.split('=', 1)])\
for param in parts[1:]
])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split('/')
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if not params.has_key('q') or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = (type == target_type or\
type == '*' or\
target_type == '*')
subtype_match = (subtype == target_subtype or\
subtype == '*' or\
target_subtype == '*')
if type_match and subtype_match:
param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in \
target_params.iteritems() if key != 'q' and \
params.has_key(key) and value == params[key]], 0)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges.
"""
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
def _filter_blank(i):
for s in i:
if s.strip():
yield s
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schema processing for discovery based APIs
Schemas holds an APIs discovery schemas. It can return those schema as
deserialized JSON objects, or pretty print them as prototype objects that
conform to the schema.
For example, given the schema:
schema = \"\"\"{
"Foo": {
"type": "object",
"properties": {
"etag": {
"type": "string",
"description": "ETag of the collection."
},
"kind": {
"type": "string",
"description": "Type of the collection ('calendar#acl').",
"default": "calendar#acl"
},
"nextPageToken": {
"type": "string",
"description": "Token used to access the next
page of this result. Omitted if no further results are available."
}
}
}
}\"\"\"
s = Schemas(schema)
print s.prettyPrintByName('Foo')
Produces the following output:
{
"nextPageToken": "A String", # Token used to access the
# next page of this result. Omitted if no further results are available.
"kind": "A String", # Type of the collection ('calendar#acl').
"etag": "A String", # ETag of the collection.
},
The constructor takes a discovery document in which to look up named schema.
"""
# TODO(jcgregorio) support format, enum, minimum, maximum
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
from oauth2client.anyjson import simplejson
class Schemas(object):
"""Schemas for an API."""
def __init__(self, discovery):
"""Constructor.
Args:
discovery: object, Deserialized discovery document from which we pull
out the named schema.
"""
self.schemas = discovery.get('schemas', {})
# Cache of pretty printed schemas.
self.pretty = {}
def _prettyPrintByName(self, name, seen=None, dent=0):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
if name in seen:
# Do not fall into an infinite loop over recursive definitions.
return '# Object with schema name: %s' % name
seen.append(name)
if name not in self.pretty:
self.pretty[name] = _SchemaToStruct(self.schemas[name],
seen, dent).to_str(self._prettyPrintByName)
seen.pop()
return self.pretty[name]
def prettyPrintByName(self, name):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
def _prettyPrintSchema(self, schema, seen=None, dent=0):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
return _SchemaToStruct(schema, seen, dent).to_str(self._prettyPrintByName)
def prettyPrintSchema(self, schema):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintSchema(schema, dent=1)[:-2]
def get(self, name):
"""Get deserialized JSON schema from the schema name.
Args:
name: string, Schema name.
"""
return self.schemas[name]
class _SchemaToStruct(object):
"""Convert schema to a prototype object."""
def __init__(self, schema, seen, dent=0):
"""Constructor.
Args:
schema: object, Parsed JSON schema.
seen: list, List of names of schema already seen while parsing. Used to
handle recursive definitions.
dent: int, Initial indentation depth.
"""
# The result of this parsing kept as list of strings.
self.value = []
# The final value of the parsing.
self.string = None
# The parsed JSON schema.
self.schema = schema
# Indentation level.
self.dent = dent
# Method that when called returns a prototype object for the schema with
# the given name.
self.from_cache = None
# List of names of schema already seen while parsing.
self.seen = seen
def emit(self, text):
"""Add text as a line to the output.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text, '\n'])
def emitBegin(self, text):
"""Add text to the output, but with no line terminator.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text])
def emitEnd(self, text, comment):
"""Add text and comment to the output with line terminator.
Args:
text: string, Text to output.
comment: string, Python comment.
"""
if comment:
divider = '\n' + ' ' * (self.dent + 2) + '# '
lines = comment.splitlines()
lines = [x.rstrip() for x in lines]
comment = divider.join(lines)
self.value.extend([text, ' # ', comment, '\n'])
else:
self.value.extend([text, '\n'])
def indent(self):
"""Increase indentation level."""
self.dent += 1
def undent(self):
"""Decrease indentation level."""
self.dent -= 1
def _to_str_impl(self, schema):
"""Prototype object based on the schema, in Python code with comments.
Args:
schema: object, Parsed JSON schema file.
Returns:
Prototype object based on the schema, in Python code with comments.
"""
stype = schema.get('type')
if stype == 'object':
self.emitEnd('{', schema.get('description', ''))
self.indent()
for pname, pschema in schema.get('properties', {}).iteritems():
self.emitBegin('"%s": ' % pname)
self._to_str_impl(pschema)
self.undent()
self.emit('},')
elif '$ref' in schema:
schemaName = schema['$ref']
description = schema.get('description', '')
s = self.from_cache(schemaName, self.seen)
parts = s.splitlines()
self.emitEnd(parts[0], description)
for line in parts[1:]:
self.emit(line.rstrip())
elif stype == 'boolean':
value = schema.get('default', 'True or False')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'string':
value = schema.get('default', 'A String')
self.emitEnd('"%s",' % str(value), schema.get('description', ''))
elif stype == 'integer':
value = schema.get('default', '42')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'number':
value = schema.get('default', '3.14')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'null':
self.emitEnd('None,', schema.get('description', ''))
elif stype == 'any':
self.emitEnd('"",', schema.get('description', ''))
elif stype == 'array':
self.emitEnd('[', schema.get('description'))
self.indent()
self.emitBegin('')
self._to_str_impl(schema['items'])
self.undent()
self.emit('],')
else:
self.emit('Unknown type! %s' % stype)
self.emitEnd('', '')
self.string = ''.join(self.value)
return self.string
def to_str(self, from_cache):
"""Prototype object based on the schema, in Python code with comments.
Args:
from_cache: callable(name, seen), Callable that retrieves an object
prototype for a schema with the given name. Seen is a list of schema
names already seen as we recursively descend the schema definition.
Returns:
Prototype object based on the schema, in Python code with comments.
The lines of the code will all be properly indented.
"""
self.from_cache = from_cache
return self._to_str_impl(self.schema)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
try: # pragma: no cover
import simplejson
except ImportError: # pragma: no cover
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import httplib2
import logging
import oauth2 as oauth
import urllib
import urlparse
from anyjson import simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class RequestError(Error):
"""Error occurred during request."""
pass
class MissingParameter(Error):
pass
class CredentialsInvalidError(Error):
pass
def _abstract():
raise NotImplementedError('You need to override this function')
def _oauth_uri(name, discovery, params):
"""Look up the OAuth URI from the discovery
document and add query parameters based on
params.
name - The name of the OAuth URI to lookup, one
of 'request', 'access', or 'authorize'.
discovery - Portion of discovery document the describes
the OAuth endpoints.
params - Dictionary that is used to form the query parameters
for the specified URI.
"""
if name not in ['request', 'access', 'authorize']:
raise KeyError(name)
keys = discovery[name]['parameters'].keys()
query = {}
for key in keys:
if key in params:
query[key] = params[key]
return discovery[name]['url'] + '?' + urllib.urlencode(query)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
apiclient.oauth.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuthCredentials(Credentials):
"""Credentials object for OAuth 1.0a
"""
def __init__(self, consumer, token, user_agent):
"""
consumer - An instance of oauth.Consumer.
token - An instance of oauth.Token constructed with
the access token and secret.
user_agent - The HTTP User-Agent to provide for this application.
"""
self.consumer = consumer
self.token = token
self.user_agent = user_agent
self.store = None
# True if the credentials have been revoked
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""Authorize an httplib2.Http instance with these Credentials
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
req = oauth.Request.from_consumer_and_token(
self.consumer, self.token, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, self.token)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
# Update the stored credential if it becomes invalid.
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
self._invalid = True
if self.store is not None:
self.store(self)
raise CredentialsInvalidError("Credentials are no longer valid.")
return resp, content
http.request = new_request
return http
class TwoLeggedOAuthCredentials(Credentials):
"""Two Legged Credentials object for OAuth 1.0a.
The Two Legged object is created directly, not from a flow. Once you
authorize and httplib2.Http instance you can change the requestor and that
change will propogate to the authorized httplib2.Http instance. For example:
http = httplib2.Http()
http = credentials.authorize(http)
credentials.requestor = 'foo@example.info'
http.request(...)
credentials.requestor = 'bar@example.info'
http.request(...)
"""
def __init__(self, consumer_key, consumer_secret, user_agent):
"""
Args:
consumer_key: string, An OAuth 1.0 consumer key
consumer_secret: string, An OAuth 1.0 consumer secret
user_agent: string, The HTTP User-Agent to provide for this application.
"""
self.consumer = oauth.Consumer(consumer_key, consumer_secret)
self.user_agent = user_agent
self.store = None
# email address of the user to act on the behalf of.
self._requestor = None
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked.
Always returns False for Two Legged Credentials.
"""
return False
def getrequestor(self):
return self._requestor
def setrequestor(self, email):
self._requestor = email
requestor = property(getrequestor, setrequestor, None,
'The email address of the user to act on behalf of')
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""Authorize an httplib2.Http instance with these Credentials
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
# add in xoauth_requestor_id=self._requestor to the uri
if self._requestor is None:
raise MissingParameter(
'Requestor must be set before using TwoLeggedOAuthCredentials')
parsed = list(urlparse.urlparse(uri))
q = parse_qsl(parsed[4])
q.append(('xoauth_requestor_id', self._requestor))
parsed[4] = urllib.urlencode(q)
uri = urlparse.urlunparse(parsed)
req = oauth.Request.from_consumer_and_token(
self.consumer, None, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, None)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
# Do not store the invalid state of the Credentials because
# being 2LO they could be reinstated in the future.
raise CredentialsInvalidError("Credentials are invalid.")
return resp, content
http.request = new_request
return http
class FlowThreeLegged(Flow):
"""Does the Three Legged Dance for OAuth 1.0a.
"""
def __init__(self, discovery, consumer_key, consumer_secret, user_agent,
**kwargs):
"""
discovery - Section of the API discovery document that describes
the OAuth endpoints.
consumer_key - OAuth consumer key
consumer_secret - OAuth consumer secret
user_agent - The HTTP User-Agent that identifies the application.
**kwargs - The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.discovery = discovery
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_agent = user_agent
self.params = kwargs
self.request_token = {}
required = {}
for uriinfo in discovery.itervalues():
for name, value in uriinfo['parameters'].iteritems():
if value['required'] and not name.startswith('oauth_'):
required[name] = 1
for key in required.iterkeys():
if key not in self.params:
raise MissingParameter('Required parameter %s not supplied' % key)
def step1_get_authorize_url(self, oauth_callback='oob'):
"""Returns a URI to redirect to the provider.
oauth_callback - Either the string 'oob' for a non-web-based application,
or a URI that handles the callback from the authorization
server.
If oauth_callback is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
body = urllib.urlencode({'oauth_callback': oauth_callback})
uri = _oauth_uri('request', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers,
body=body)
if resp['status'] != '200':
logging.error('Failed to retrieve temporary authorization: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
self.request_token = dict(parse_qsl(content))
auth_params = copy.copy(self.params)
auth_params['oauth_token'] = self.request_token['oauth_token']
return _oauth_uri('authorize', self.discovery, auth_params)
def step2_exchange(self, verifier):
"""Exhanges an authorized request token
for OAuthCredentials.
Args:
verifier: string, dict - either the verifier token, or a dictionary
of the query parameters to the callback, which contains
the oauth_verifier.
Returns:
The Credentials object.
"""
if not (isinstance(verifier, str) or isinstance(verifier, unicode)):
verifier = verifier['oauth_verifier']
token = oauth.Token(
self.request_token['oauth_token'],
self.request_token['oauth_token_secret'])
token.set_verifier(verifier)
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer, token)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
uri = _oauth_uri('access', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers)
if resp['status'] != '200':
logging.error('Failed to retrieve access token: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
oauth_params = dict(parse_qsl(content))
token = oauth.Token(
oauth_params['oauth_token'],
oauth_params['oauth_token_secret'])
return OAuthCredentials(consumer, token, self.user_agent)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs
A client library for Google's discovery based APIs.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build',
'build_from_document'
'fix_method_name',
'key2param'
]
import copy
import httplib2
import logging
import os
import random
import re
import uritemplate
import urllib
import urlparse
import mimeparse
import mimetypes
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
from apiclient.errors import MediaUploadSizeError
from apiclient.errors import UnacceptableMimeTypeError
from apiclient.errors import UnknownApiNameOrVersion
from apiclient.errors import UnknownLinkType
from apiclient.http import HttpRequest
from apiclient.http import MediaFileUpload
from apiclient.http import MediaUpload
from apiclient.model import JsonModel
from apiclient.model import MediaModel
from apiclient.model import RawModel
from apiclient.schema import Schemas
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from oauth2client.anyjson import simplejson
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
# Parameters accepted by the stack, but not visible via discovery.
STACK_QUERY_PARAMETERS = ['trace', 'pp', 'userip', 'strict']
# Python reserved words.
RESERVED_WORDS = ['and', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'else', 'except', 'exec', 'finally', 'for', 'from',
'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or',
'pass', 'print', 'raise', 'return', 'try', 'while' ]
def fix_method_name(name):
"""Fix method names to avoid reserved word conflicts.
Args:
name: string, method name.
Returns:
The name with a '_' prefixed if the name is a reserved word.
"""
if name in RESERVED_WORDS:
return name + '_'
else:
return name
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
Args:
key: string, the method key name.
Returns:
A safe method name based on the key name.
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
def build(serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with an API. The serviceName and
version are the names from the Discovery service.
Args:
serviceName: string, name of the service.
version: string, the version of the service.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URI to the discovery
document for that service.
developerKey: string, key obtained from
https://code.google.com/apis/console.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: apiclient.http.HttpRequest, encapsulator for an HTTP
request.
Returns:
A Resource object with methods for interacting with the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logger.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logger.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
return build_from_document(content, discoveryServiceUrl, http=http,
developerKey=developerKey, model=model, requestBuilder=requestBuilder)
def build_from_document(
service,
base,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object from a discovery
document that is it given, as opposed to retrieving one over HTTP.
Args:
service: string, discovery document.
base: string, base URI for all HTTP requests, usually the discovery URI.
future: string, discovery document with future capabilities (deprecated).
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and de-serializes requests and
responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with the service.
"""
# future is no longer used.
future = {}
service = simplejson.loads(service)
base = urlparse.urljoin(base, service['basePath'])
schema = Schemas(service)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
resource = _createResource(http, base, model, requestBuilder, developerKey,
service, service, schema)
return resource
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
MULTIPLIERS = {
"KB": 2 ** 10,
"MB": 2 ** 20,
"GB": 2 ** 30,
"TB": 2 ** 40,
}
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
maxSize: string, size as a string, such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if len(maxSize) < 2:
return 0
units = maxSize[-2:].upper()
multiplier = MULTIPLIERS.get(units, 0)
if multiplier:
return int(maxSize[:-2]) * multiplier
else:
return int(maxSize)
def _createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, rootDesc, schema):
"""Build a Resource from the API description.
Args:
http: httplib2.Http, Object to make http requests with.
baseUrl: string, base URL for the API. All requests are relative to this
URI.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: class or callable that instantiates an
apiclient.HttpRequest object.
developerKey: string, key obtained from
https://code.google.com/apis/console
resourceDesc: object, section of deserialized discovery document that
describes a resource. Note that the top level discovery document
is considered a resource.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
Returns:
An instance of Resource with all the methods attached for interacting with
that resource.
"""
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates a method for attaching to a Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
mediaPathUrl = None
accept = []
maxSize = 0
if 'mediaUpload' in methodDesc:
mediaUpload = methodDesc['mediaUpload']
# TODO(jcgregorio) Use URLs from discovery once it is updated.
parsed = list(urlparse.urlparse(baseUrl))
basePath = parsed[2]
mediaPathUrl = '/upload' + basePath + pathUrl
accept = mediaUpload['accept']
maxSize = _media_size_to_long(mediaUpload.get('maxSize', ''))
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
# Add in the parameters common to all methods.
for name, desc in rootDesc.get('parameters', {}).iteritems():
methodDesc['parameters'][name] = desc
# Add in undocumented query parameters.
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH'] and 'request' in methodDesc:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
if 'request' in methodDesc:
methodDesc['parameters']['body'].update(methodDesc['request'])
else:
methodDesc['parameters']['body']['type'] = 'object'
if 'mediaUpload' in methodDesc:
methodDesc['parameters']['media_body'] = {
'description': 'The filename of the media request body.',
'type': 'string',
'required': False,
}
if 'body' in methodDesc['parameters']:
methodDesc['parameters']['body']['required'] = False
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
# Don't bother with doc string, it will be over-written by createMethod.
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
# Remove args that have a value of None.
keys = kwargs.keys()
for name in keys:
if kwargs[name] is None:
del kwargs[name]
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
# We need to handle the case of a repeated enum
# name differently, since we want to handle both
# arg='value' and arg=['value1', 'value2']
if (name in repeated_params and
not isinstance(kwargs[name], basestring)):
values = kwargs[name]
else:
values = [kwargs[name]]
for value in values:
if value not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, value, str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
# If there is no schema for the response then presume a binary blob.
if methodName.endswith('_media'):
model = MediaModel()
elif 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename, media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
url = _add_query_parameter(url, 'uploadType', 'resumable')
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
url = _add_query_parameter(url, 'uploadType', 'media')
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
url = _add_query_parameter(url, 'uploadType', 'multipart')
logger.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
# Skip undocumented params and params common to all methods.
skip_parameters = rootDesc.get('parameters', {}).keys()
skip_parameters.append(STACK_QUERY_PARAMETERS)
for arg in argmap.iterkeys():
if arg in skip_parameters:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
if methodName.endswith('_media'):
docs.append('\nReturns:\n The media object as a string.\n\n ')
else:
docs.append('\nReturns:\n An object of the form:\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates any _next methods for attaching to a Resource.
The _next methods allow for easy iteration through list() responses.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page.
previous_response: The response from the request for the previous page.
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logger.info('URL being requested: %s' % uri)
return request
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
createMethod(Resource, methodName, methodDesc, rootDesc)
# Add in _media methods. The functionality of the attached method will
# change when it sees that the method name ends in _media.
if methodDesc.get('supportsMediaDownload', False):
createMethod(Resource, methodName + '_media', methodDesc, rootDesc)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, rootDesc):
"""Create a method on the Resource to access a nested Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
def methodResource(self):
return _createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, rootDesc, schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
createResourceMethod(Resource, methodName, methodDesc, rootDesc)
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodName)
return Resource()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use the
Google API Client for Python on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
from google.appengine.ext import db
from apiclient.oauth import OAuthCredentials
from apiclient.oauth import FlowThreeLegged
class FlowThreeLeggedProperty(db.Property):
"""Utility property that allows easy
storage and retreival of an
apiclient.oauth.FlowThreeLegged"""
# Tell what the user type is.
data_type = FlowThreeLegged
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowThreeLeggedProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, FlowThreeLegged):
raise BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowThreeLeggedProperty, self).validate(value)
def empty(self, value):
return not value
class OAuthCredentialsProperty(db.Property):
"""Utility property that allows easy
storage and retrieval of
apiclient.oath.OAuthCredentials
"""
# Tell what the user type is.
data_type = OAuthCredentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
cred = super(OAuthCredentialsProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(cred))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, OAuthCredentials):
raise BadValueError('Property %s must be convertible '
'to an OAuthCredentials instance (%s)' %
(self.name, value))
return super(OAuthCredentialsProperty, self).validate(value)
def empty(self, value):
return not value
class StorageByKeyName(object):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
"""
self.model = model
self.key_name = key_name
self.property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
Credentials
"""
entity = self.model.get_or_insert(self.key_name)
credential = getattr(entity, self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self.model.get_or_insert(self.key_name)
setattr(entity, self.property_name, credentials)
entity.put()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 1.0
Do the OAuth 1.0 Three Legged Dance for
a command line application. Stores the generated
credentials in a common file that is used by
other example apps in the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ["run"]
import BaseHTTPServer
import gflags
import logging
import socket
import sys
from optparse import OptionParser
from apiclient.oauth import RequestError
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 1.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 1.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 1.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
Returns:
Credentials, the obtained credential.
Exceptions:
RequestError: if step2 of the flow fails.
Args:
"""
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = BaseHTTPServer.HTTPServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = 'oob'
authorize_url = flow.step1_get_authorize_url(oauth_callback)
print 'Go to the following link in your browser:'
print authorize_url
print
if FLAGS.auth_local_webserver:
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter --noauth_local_webserver.'
print
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'oauth_verifier' in httpd.query_params:
code = httpd.query_params['oauth_verifier']
else:
accepted = 'n'
while accepted.lower() == 'n':
accepted = raw_input('Have you authorized me? (y/n) ')
code = raw_input('What is the verification code? ').strip()
try:
credentials = flow.step2_exchange(code)
except RequestError:
sys.exit('The authentication has failed.')
storage.put(credentials)
credentials.set_store(storage.put)
print "You have successfully authenticated."
return credentials
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 1.0 credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
import threading
from apiclient.oauth import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def get(self):
"""Retrieve Credential from file.
Returns:
apiclient.oauth.Credentials
"""
self._lock.acquire()
try:
f = open(self._filename, 'r')
credentials = pickle.loads(f.read())
f.close()
credentials.set_store(self.put)
except:
credentials = None
self._lock.release()
return credentials
def put(self, credentials):
"""Write a pickled Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._lock.acquire()
f = open(self._filename, 'w')
f.write(pickle.dumps(credentials))
f.close()
self._lock.release()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apiclient
import base64
import pickle
from django.db import models
class OAuthCredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, apiclient.oauth.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowThreeLeggedField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
print "In to_python", value
if value is None:
return None
if isinstance(value, apiclient.oauth.FlowThreeLegged):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
| Python |
__version__ = "1.0c2"
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import logging
import urllib
from errors import HttpError
from oauth2client.anyjson import simplejson
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('dump_request_response', False,
'Dump all http server requests and responses. '
)
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class BaseModel(Model):
"""Base model class.
Subclasses should provide implementations for the "serialize" and
"deserialize" methods, as well as values for the following class attributes.
Attributes:
accept: The value to use for the HTTP Accept header.
content_type: The value to use for the HTTP Content-type header.
no_content_response: The value to return when deserializing a 204 "No
Content" response.
alt_param: The value to supply as the "alt" query parameter for requests.
"""
accept = None
content_type = None
no_content_response = None
alt_param = None
def _log_request(self, headers, path_params, query, body):
"""Logs debugging information about the request if requested."""
if FLAGS.dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = self.accept
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/1.0'
if body_value is not None:
headers['content-type'] = self.content_type
body_value = self.serialize(body_value)
self._log_request(headers, path_params, query, body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
if self.alt_param is not None:
params.update({'alt': self.alt_param})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def _log_response(self, resp, content):
"""Logs debugging information about the response if requested."""
if FLAGS.dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
self._log_response(resp, content)
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return self.no_content_response
return self.deserialize(content)
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
def serialize(self, body_value):
"""Perform the actual Python object serialization.
Args:
body_value: object, the request body as a Python object.
Returns:
string, the body in serialized form.
"""
_abstract()
def deserialize(self, content):
"""Perform the actual deserialization from response string to Python
object.
Args:
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
"""
_abstract()
class JsonModel(BaseModel):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
accept = 'application/json'
content_type = 'application/json'
alt_param = 'json'
def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def serialize(self, body_value):
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
return simplejson.dumps(body_value)
def deserialize(self, content):
body = simplejson.loads(content)
if isinstance(body, dict) and 'data' in body:
body = body['data']
return body
@property
def no_content_response(self):
return {}
class RawModel(JsonModel):
"""Model class for requests that don't return JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = None
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class MediaModel(JsonModel):
"""Model class for requests that return Media.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = 'media'
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class ProtocolBufferModel(BaseModel):
"""Model class for protocol buffers.
Serializes and de-serializes the binary protocol buffer sent in the HTTP
request and response bodies.
"""
accept = 'application/x-protobuf'
content_type = 'application/x-protobuf'
alt_param = 'proto'
def __init__(self, protocol_buffer):
"""Constructs a ProtocolBufferModel.
The serialzed protocol buffer returned in an HTTP response will be
de-serialized using the given protocol buffer class.
Args:
protocol_buffer: The protocol buffer class used to de-serialize a
response from the API.
"""
self._protocol_buffer = protocol_buffer
def serialize(self, body_value):
return body_value.SerializeToString()
def deserialize(self, content):
return self._protocol_buffer.FromString(content)
@property
def no_content_response(self):
return self._protocol_buffer()
def makepatch(original, modified):
"""Create a patch object.
Some methods support PATCH, an efficient way to send updates to a resource.
This method allows the easy construction of patch bodies by looking at the
differences between a resource before and after it was modified.
Args:
original: object, the original deserialized resource
modified: object, the modified deserialized resource
Returns:
An object that contains only the changes from original to modified, in a
form suitable to pass to a PATCH method.
Example usage:
item = service.activities().get(postid=postid, userid=userid).execute()
original = copy.deepcopy(item)
item['object']['content'] = 'This is updated.'
service.activities.patch(postid=postid, userid=userid,
body=makepatch(original, item)).execute()
"""
patch = {}
for key, original_value in original.iteritems():
modified_value = modified.get(key, None)
if modified_value is None:
# Use None to signal that the element is deleted
patch[key] = None
elif original_value != modified_value:
if type(original_value) == type({}):
# Recursively descend objects
patch[key] = makepatch(original_value, modified_value)
else:
# In the case of simple types or arrays we just replace
patch[key] = modified_value
else:
# Don't add anything to patch if there's no change
pass
for key in modified:
if key not in original:
patch[key] = modified[key]
return patch
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from oauth2client.anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content."""
if self.resp.get('content-type', '').startswith('application/json'):
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
reason = self.content
else:
reason = self.resp.reason
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
class UnknownApiNameOrVersion(Error):
"""No API with that name and version exists."""
pass
class UnacceptableMimeTypeError(Error):
"""That is an unacceptable mimetype for this operation."""
pass
class MediaUploadSizeError(Error):
"""Media is larger than the method can accept."""
pass
class ResumableUploadError(Error):
"""Error occured during resumable upload."""
pass
class BatchError(HttpError):
"""Error occured during batch operations."""
def __init__(self, reason, resp=None, content=None):
self.resp = resp
self.content = content
self.reason = reason
def __repr__(self):
return '<BatchError %s "%s">' % (self.resp.status, self.reason)
__str__ = __repr__
class UnexpectedMethodError(Error):
"""Exception raised by RequestMockBuilder on unexpected calls."""
def __init__(self, methodId=None):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedMethodError, self).__init__(
'Received unexpected call %s' % methodId)
class UnexpectedBodyError(Error):
"""Exception raised by RequestMockBuilder on unexpected bodies."""
def __init__(self, expected, provided):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedBodyError, self).__init__(
'Expected: [%s] - Provided: [%s]' % (expected, provided))
| Python |
"""
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| Python |
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.7.2"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
import errno
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
self.host, self.port, 0, socket.SOCK_STREAM):
try:
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
from google.appengine.api.urlfetch import DownloadError
from google.appengine.api.urlfetch import ResponseTooLargeError
from google.appengine.api.urlfetch import SSLCertificateError
class ResponseDict(dict):
"""Is a dictionary that also has a read() method, so
that it can pass itself off as an httlib.HTTPResponse()."""
def read(self):
pass
class AppEngineHttpConnection(object):
"""Emulates an httplib.HTTPConnection object, but actually uses the Google
App Engine urlfetch library. This allows the timeout to be properly used on
Google App Engine, and avoids using httplib, which on Google App Engine is
just another wrapper around urlfetch.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_certificate_validation=False):
self.host = host
self.port = port
self.timeout = timeout
if key_file or cert_file or proxy_info or ca_certs:
raise NotSupportedOnThisPlatform()
self.response = None
self.scheme = 'http'
self.validate_certificate = not disable_certificate_validation
self.sock = True
def request(self, method, url, body, headers):
# Calculate the absolute URI, which fetch requires
netloc = self.host
if self.port:
netloc = '%s:%s' % (self.host, self.port)
absolute_uri = '%s://%s%s' % (self.scheme, netloc, url)
try:
response = fetch(absolute_uri, payload=body, method=method,
headers=headers, allow_truncated=False, follow_redirects=False,
deadline=self.timeout,
validate_certificate=self.validate_certificate)
self.response = ResponseDict(response.headers)
self.response['status'] = str(response.status_code)
self.response.status = response.status_code
setattr(self.response, 'read', lambda : response.content)
# Make sure the exceptions raised match the exceptions expected.
except InvalidURLError:
raise socket.gaierror('')
except (DownloadError, ResponseTooLargeError, SSLCertificateError):
raise httplib.HTTPException()
def getresponse(self):
if self.response:
return self.response
else:
raise httplib.HTTPException()
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
class AppEngineHttpsConnection(AppEngineHttpConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
AppEngineHttpConnection.__init__(self, host, port, key_file, cert_file,
strict, timeout, proxy_info)
self.scheme = 'https'
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except ImportError:
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
"""
The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i == 0:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i == 0:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if issubclass(connection_type, HTTPSConnectionWithTimeout):
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=self.proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=self.proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.