Update handler.py
Browse files- handler.py +3 -3
handler.py
CHANGED
|
@@ -53,7 +53,7 @@ class EndpointHandler():
|
|
| 53 |
if (indices_array):
|
| 54 |
for result_indices in indices_array:
|
| 55 |
text = self.tokenizer.decode(input_ids[result_indices[0]:result_indices[-1]])
|
| 56 |
-
indices = [offset_mapping[result_indices[0]][
|
| 57 |
if text != "" and not text.isspace():
|
| 58 |
while True:
|
| 59 |
if text[0] == " ":
|
|
@@ -74,7 +74,7 @@ class EndpointHandler():
|
|
| 74 |
if token_logits[2] > label_tolerance:
|
| 75 |
result_indices.append(index)
|
| 76 |
else:
|
| 77 |
-
result_indices
|
| 78 |
labeled_result_indices.append(result_indices)
|
| 79 |
result_indices = []
|
| 80 |
|
|
@@ -96,7 +96,7 @@ class EndpointHandler():
|
|
| 96 |
result_indices.append(index)
|
| 97 |
else:
|
| 98 |
# Check if backup result overlaps at all with any labeled result. If it does just ignore it
|
| 99 |
-
result_indices
|
| 100 |
overlaps_labeled_result = False
|
| 101 |
if (len(labeled_result_indices) > 0):
|
| 102 |
for index in result_indices:
|
|
|
|
| 53 |
if (indices_array):
|
| 54 |
for result_indices in indices_array:
|
| 55 |
text = self.tokenizer.decode(input_ids[result_indices[0]:result_indices[-1]])
|
| 56 |
+
indices = [offset_mapping[result_indices[0]-1][1], offset_mapping[result_indices[-2]][1]]
|
| 57 |
if text != "" and not text.isspace():
|
| 58 |
while True:
|
| 59 |
if text[0] == " ":
|
|
|
|
| 74 |
if token_logits[2] > label_tolerance:
|
| 75 |
result_indices.append(index)
|
| 76 |
else:
|
| 77 |
+
result_indices.append(index)
|
| 78 |
labeled_result_indices.append(result_indices)
|
| 79 |
result_indices = []
|
| 80 |
|
|
|
|
| 96 |
result_indices.append(index)
|
| 97 |
else:
|
| 98 |
# Check if backup result overlaps at all with any labeled result. If it does just ignore it
|
| 99 |
+
result_indices.append(index)
|
| 100 |
overlaps_labeled_result = False
|
| 101 |
if (len(labeled_result_indices) > 0):
|
| 102 |
for index in result_indices:
|