Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForTokenClassification
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
# Load the tokenizer and models for the first pipeline
|
| 7 |
+
tokenizer_ext = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_token")
|
| 8 |
+
model_ext = AutoModelForTokenClassification.from_pretrained("AlGe/deberta-v3-large_token")
|
| 9 |
+
tokenizer_ext.model_max_length = 512
|
| 10 |
+
pipe_ext = gr.pipeline("ner", model=model_ext, tokenizer=tokenizer_ext)
|
| 11 |
+
|
| 12 |
+
# Load the tokenizer and models for the second pipeline
|
| 13 |
+
tokenizer_ais = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_AIS-token")
|
| 14 |
+
model_ais = AutoModelForTokenClassification.from_pretrained("AlGe/deberta-v3-large_AIS-token")
|
| 15 |
+
tokenizer_ais.model_max_length = 512
|
| 16 |
+
pipe_ais = gr.pipeline("ner", model=model_ais, tokenizer=tokenizer_ais)
|
| 17 |
+
|
| 18 |
+
# Load the tokenizer and models for the third pipeline
|
| 19 |
+
auth_token = os.environ['HF_TOKEN']
|
| 20 |
+
model1 = AutoModelForSequenceClassification.from_pretrained("AlGe/deberta-v3-large_Int_segment", num_labels=1, use_auth_token=auth_token)
|
| 21 |
+
tokenizer1 = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_Int_segment", use_auth_token=auth_token)
|
| 22 |
+
|
| 23 |
+
model2 = AutoModelForSequenceClassification.from_pretrained("AlGe/deberta-v3-large_seq_ext", num_labels=1, use_auth_token=auth_token)
|
| 24 |
+
|
| 25 |
+
# Define functions to process inputs
|
| 26 |
+
def process_ner(text, pipeline):
|
| 27 |
+
output = pipeline(text)
|
| 28 |
+
entities = []
|
| 29 |
+
current_entity = None
|
| 30 |
+
|
| 31 |
+
for token in output:
|
| 32 |
+
entity_type = token['entity'][2:]
|
| 33 |
+
entity_prefix = token['entity'][:1]
|
| 34 |
+
|
| 35 |
+
if current_entity is None or entity_type != current_entity['entity'] or (entity_prefix == 'B' and entity_type == current_entity['entity']):
|
| 36 |
+
if current_entity is not None:
|
| 37 |
+
entities.append(current_entity)
|
| 38 |
+
current_entity = {
|
| 39 |
+
"entity": entity_type,
|
| 40 |
+
"start": token['start'],
|
| 41 |
+
"end": token['end'],
|
| 42 |
+
"score": token['score']
|
| 43 |
+
}
|
| 44 |
+
else:
|
| 45 |
+
current_entity['end'] = token['end']
|
| 46 |
+
current_entity['score'] = max(current_entity['score'], token['score'])
|
| 47 |
+
|
| 48 |
+
if current_entity is not None:
|
| 49 |
+
entities.append(current_entity)
|
| 50 |
+
|
| 51 |
+
return {"text": text, "entities": entities}
|
| 52 |
+
|
| 53 |
+
def process_classification(text, model1, model2, tokenizer1):
|
| 54 |
+
inputs1 = tokenizer1(text, max_length=512, return_tensors='pt', truncation=True, padding=True)
|
| 55 |
+
|
| 56 |
+
with torch.no_grad():
|
| 57 |
+
outputs1 = model1(**inputs1)
|
| 58 |
+
outputs2 = model2(**inputs1)
|
| 59 |
+
|
| 60 |
+
prediction1 = outputs1[0].item()
|
| 61 |
+
prediction2 = outputs2[0].item()
|
| 62 |
+
score = prediction1 / (prediction2 + prediction1)
|
| 63 |
+
|
| 64 |
+
return f"{round(prediction1, 1)}", f"{round(prediction2, 1)}", f"{round(score, 2)}"
|
| 65 |
+
|
| 66 |
+
# Define Gradio interface
|
| 67 |
+
iface = gr.Interface(
|
| 68 |
+
fn={
|
| 69 |
+
"NER - Extended Sequence Classification": lambda text: process_ner(text, pipe_ext),
|
| 70 |
+
"NER - Autobiographical Interview Scoring": lambda text: process_ner(text, pipe_ais),
|
| 71 |
+
"Internal Detail Count": lambda text: process_classification(text, model1, model2, tokenizer1)[0],
|
| 72 |
+
"External Detail Count": lambda text: process_classification(text, model1, model2, tokenizer1)[1],
|
| 73 |
+
"Approximated Internal Detail Ratio": lambda text: process_classification(text, model1, model2, tokenizer1)[2]
|
| 74 |
+
},
|
| 75 |
+
inputs=gr.Textbox(placeholder="Enter sentence here..."),
|
| 76 |
+
outputs=[
|
| 77 |
+
gr.HighlightedText(label="NER - Extended Sequence Classification"),
|
| 78 |
+
gr.HighlightedText(label="NER - Autobiographical Interview Scoring"),
|
| 79 |
+
gr.Label(label="Internal Detail Count"),
|
| 80 |
+
gr.Label(label="External Detail Count"),
|
| 81 |
+
gr.Label(label="Approximated Internal Detail Ratio")
|
| 82 |
+
],
|
| 83 |
+
title="Combined Demo",
|
| 84 |
+
description="This demo combines two different NER models and two different sequence classification models. Enter a sentence to see the results.",
|
| 85 |
+
theme="monochrome"
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
# Launch the combined interface
|
| 89 |
+
iface.launch()
|