File size: 5,321 Bytes
c705f71 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
# nucle_doc.py
#
# Author: Yuanbin Wu
# National University of Singapore (NUS)
# Date: 12 Mar 2013
# Version: 1.0
#
# Contact: wuyb@comp.nus.edu.sg
#
# This script is distributed to support the CoNLL-2013 Shared Task.
# It is free for research and educational purposes.
import os
import sys
from nltk import word_tokenize
class nucle_doc:
def __init__(self):
self.docattrs = None
self.matric = ''
self.email = ''
self.nationality = ''
self.firstLanguage = ''
self.schoolLanguage = ''
self.englishTests = ''
self.paragraphs = []
self.annotation = []
self.mistakes = []
self.sentences = []
def buildSentence(self, sentstr, dpnode, constituentstr, poslist, chunklist):
self.sentences[-1].append(nucle_sent(sentstr, dpnode, constituentstr, poslist, chunklist))
def addSentence(self, sent):
self.sentences[-1].append(sent)
def findMistake(self, par, pos):
for m in self.mistakes:
if par == m['start_par'] and pos >= m['start_off'] and pos < m['end_off']:
return m
return None
class nucle_sent:
def __init__(self, sentstr, dpnode, constituentstr, poslist, chunklist):
self.sentstr = sentstr
self.words = word_tokenize(sentstr)
self.dpnodes = dpnode
self.constituentstr = constituentstr
self.constituentlist = []
self.poslist = poslist
self.chunklist = chunklist
def buildConstituentList(self):
s = self.constituentstr.strip().replace('\n', '').replace(' ', '')
r = []
i = 0
while i < len(s):
j = i
while j < len(s) and s[j] != ')':
j += 1
k = j
while k < len(s) and s[k] == ')':
k += 1
nodeWholeStr = s[i:k]
lastLRBIndex = nodeWholeStr.rfind('(')
nodeStr = nodeWholeStr[:lastLRBIndex] + '*' + s[j+1:k]
r.append(nodeStr)
i = k
if len(r) != len(self.words):
print >> sys.stderr, 'Error in buiding constituent tree bits: different length with words.'
print >> sys.stderr, len(r), len(self.words)
print >> sys.stderr, ' '.join(r).encode('utf-8')
print >> sys.stderr, words
sys.exit(1)
self.constituentlist = r
def setDpNode(self, dpnode):
self.dpnodes = dpnode
def setPOSList(self, poslist):
self.poslist = poslist
def setConstituentStr(self, constituentstr):
self.constituentstr = constituentstr
def setConstituentList(self, constituentlist):
self.constituentlist = constituentlist
def setWords(self, words):
self.words = words
def setChunkList(self, chunklist):
self.chunklist = chunklist
def getDpNode(self):
return self.dpnodes
def getPOSList(self):
return self.poslist
def getConstituentStr(self):
return self.constituentstr
def getConstituentList(self):
return self.constituentlist
def getWords(self):
return self.words
def getChunkList(self):
return self.chunklist
def getConllFormat(self, doc, paragraphIndex, sentIndex):
table = []
dpnodes = self.getDpNode()
poslist = self.getPOSList()
#chunklist = self.getChunkList()
words = self.getWords()
constituentlist = self.getConstituentList()
if len(poslist) == 0:
hasParseInfo = 0
else:
hasParseInfo = 1
if len(words) != len(poslist) and len(poslist) != 0:
print >> sys.stderr, 'Error in buiding Conll Format: different length stanford parser postags and words.'
print >> sys.stderr, 'len words:', len(words), words
print >> sys.stderr, 'len poslist:', len(poslist), poslist
sys.exit(1)
for wdindex in xrange(len(words)):
word = words[wdindex]
row = []
row.append(doc.docattrs[0][1]) #docinfo
row.append(paragraphIndex) #paragraph index
row.append(sentIndex) #paragraph index
row.append(wdindex) #word index
row.append(word) #word
#row.append(chunknode.label) #chunk
if hasParseInfo == 1:
posword = poslist[wdindex]
splitp = posword.rfind('/')
pos = posword[splitp+1 : ].strip()
#chunknode = chunklist[wdindex]
constituentnode = constituentlist[wdindex]
dpnode = None
for d in dpnodes:
if d.index == wdindex:
dpnode = d
break
row.append(pos) #POS
if dpnode == None:
row.append('-')
row.append('-')
else:
row.append(dpnode.parent_index) #dp parent
row.append(dpnode.grammarrole) #dp label
row.append(constituentnode) #constituent
table.append(row)
return table
|