Change to extend list when needed
Browse files- handler.py +11 -14
handler.py
CHANGED
|
@@ -39,15 +39,15 @@ class EndpointHandler():
|
|
| 39 |
# eliciting only uses text
|
| 40 |
doc = nlp(utterance.text)
|
| 41 |
if len(doc) > token_limit:
|
| 42 |
-
return self.handle_long_utterances(doc)
|
| 43 |
-
return utterance.text
|
| 44 |
|
| 45 |
def connecting_utterance_to_str(self, utterance: Utterance) -> str:
|
| 46 |
# connecting only uses text
|
| 47 |
doc = nlp(utterance.text)
|
| 48 |
if len(doc) > token_limit:
|
| 49 |
-
return self.handle_long_utterances(doc)
|
| 50 |
-
return utterance.text
|
| 51 |
|
| 52 |
def probing_utterance_to_str(self, utterance: Utterance) -> str:
|
| 53 |
#probing uses prior text and truncates end of the prior text
|
|
@@ -213,21 +213,18 @@ class EndpointHandler():
|
|
| 213 |
for utterance in self.process_vtt_transcript(data_file):
|
| 214 |
#TODO: filter out to only have SL utterances
|
| 215 |
if model_id == 'eliciting':
|
| 216 |
-
|
| 217 |
elif model_id == 'connecting':
|
| 218 |
-
|
| 219 |
elif model_id == 'probing':
|
| 220 |
utterance_str, is_list = self.probing_utterance_to_str(utterance)
|
| 221 |
-
if is_list == 'list':
|
| 222 |
-
utterances_list.extend(utterance_str)
|
| 223 |
-
else:
|
| 224 |
-
utterances_list.append(utterance_str)
|
| 225 |
elif model_id == 'adding_on':
|
| 226 |
utterance_str, is_list = self.adding_on_utterance_to_str(utterance)
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
|
|
|
| 231 |
|
| 232 |
cuda_available = torch.cuda.is_available()
|
| 233 |
if model_id == 'eliciting':
|
|
|
|
| 39 |
# eliciting only uses text
|
| 40 |
doc = nlp(utterance.text)
|
| 41 |
if len(doc) > token_limit:
|
| 42 |
+
return self.handle_long_utterances(doc), 'list'
|
| 43 |
+
return utterance.text, 'single'
|
| 44 |
|
| 45 |
def connecting_utterance_to_str(self, utterance: Utterance) -> str:
|
| 46 |
# connecting only uses text
|
| 47 |
doc = nlp(utterance.text)
|
| 48 |
if len(doc) > token_limit:
|
| 49 |
+
return self.handle_long_utterances(doc), 'list'
|
| 50 |
+
return utterance.text, 'single'
|
| 51 |
|
| 52 |
def probing_utterance_to_str(self, utterance: Utterance) -> str:
|
| 53 |
#probing uses prior text and truncates end of the prior text
|
|
|
|
| 213 |
for utterance in self.process_vtt_transcript(data_file):
|
| 214 |
#TODO: filter out to only have SL utterances
|
| 215 |
if model_id == 'eliciting':
|
| 216 |
+
utterance_str, is_list = self.eliciting_utterance_to_str(utterance)
|
| 217 |
elif model_id == 'connecting':
|
| 218 |
+
utterance_str, is_list = self.connecting_utterance_to_str(utterance)
|
| 219 |
elif model_id == 'probing':
|
| 220 |
utterance_str, is_list = self.probing_utterance_to_str(utterance)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
elif model_id == 'adding_on':
|
| 222 |
utterance_str, is_list = self.adding_on_utterance_to_str(utterance)
|
| 223 |
+
|
| 224 |
+
if is_list == 'list':
|
| 225 |
+
utterances_list.extend(utterance_str)
|
| 226 |
+
else:
|
| 227 |
+
utterances_list.append(utterance_str)
|
| 228 |
|
| 229 |
cuda_available = torch.cuda.is_available()
|
| 230 |
if model_id == 'eliciting':
|