Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,40 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
def predict(text):
|
| 2 |
greeting_pattern = r"^(Halló|Hæ|Sæl|Góða|Kær|Daginn|Kvöldið|Ágæt|Elsku)"
|
|
|
|
| 3 |
greeting_feedback = ""
|
| 4 |
|
| 5 |
-
#
|
| 6 |
-
|
| 7 |
-
input_ids = inputs['input_ids']
|
| 8 |
-
attention_mask = inputs['attention_mask']
|
| 9 |
-
|
| 10 |
-
# Set model to evaluation mode
|
| 11 |
-
model.eval()
|
| 12 |
-
|
| 13 |
-
# Use the model's forward pass
|
| 14 |
-
output = model(input_ids, attention_mask=attention_mask)
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
attributions = ig.attribute(inputs=input_ids,
|
| 23 |
-
additional_forward_args=(attention_mask,),
|
| 24 |
-
target=top_pred_indices)
|
| 25 |
-
|
| 26 |
-
# Sum the attributions across the embedding dimension and normalize
|
| 27 |
-
attributions_sum = attributions.sum(dim=-1).squeeze(0) # Sum across embedding dim
|
| 28 |
-
attributions_sum = attributions_sum / torch.norm(attributions_sum)
|
| 29 |
-
|
| 30 |
-
# Get the most influential token
|
| 31 |
-
most_influential_token_index = attributions_sum.argmax().item()
|
| 32 |
-
most_influential_word = tokenizer.decode([input_ids[0][most_influential_token_index]])
|
| 33 |
-
|
| 34 |
-
# Prepare the response
|
| 35 |
-
response = f"Most influential word: {most_influential_word}\n\n"
|
| 36 |
-
for i, score in enumerate(predictions[0]):
|
| 37 |
-
label = model.config.id2label[i]
|
| 38 |
response += f"{label}: {score:.3f}\n"
|
| 39 |
|
| 40 |
if not re.match(greeting_pattern, text, re.IGNORECASE):
|
|
@@ -43,3 +27,23 @@ def predict(text):
|
|
| 43 |
response += greeting_feedback
|
| 44 |
|
| 45 |
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
# Initialize the pipeline with your custom model
|
| 6 |
+
text_pipe = pipeline("text-classification", model="karalif/myTestModel", return_all_scores=True)
|
| 7 |
+
|
| 8 |
def predict(text):
|
| 9 |
greeting_pattern = r"^(Halló|Hæ|Sæl|Góða|Kær|Daginn|Kvöldið|Ágæt|Elsku)"
|
| 10 |
+
|
| 11 |
greeting_feedback = ""
|
| 12 |
|
| 13 |
+
# Adding the input text at the beginning of the response, followed by a new line
|
| 14 |
+
response = f"Input: {text}\n\n" # Added this line to include the input in the response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
results = text_pipe(text)
|
| 17 |
+
all_scores = results[0]
|
| 18 |
+
|
| 19 |
+
for result in all_scores:
|
| 20 |
+
label = result['label']
|
| 21 |
+
score = result['score']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
response += f"{label}: {score:.3f}\n"
|
| 23 |
|
| 24 |
if not re.match(greeting_pattern, text, re.IGNORECASE):
|
|
|
|
| 27 |
response += greeting_feedback
|
| 28 |
|
| 29 |
return response
|
| 30 |
+
|
| 31 |
+
description_html = """
|
| 32 |
+
<center>
|
| 33 |
+
<img src='http://www.ru.is/media/HR_logo_vinstri_transparent.png' width='250' height='auto'>
|
| 34 |
+
</center>
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
gr.Interface(
|
| 38 |
+
fn=predict,
|
| 39 |
+
inputs=gr.TextArea(label="Enter text here:"),
|
| 40 |
+
outputs=gr.TextArea(label="Feedback on text input:"),
|
| 41 |
+
description=description_html,
|
| 42 |
+
examples=[
|
| 43 |
+
["Það voru vitni að árásinni sem tilkynntu málið til lögreglu sem kom skjótt á vettvang."],
|
| 44 |
+
["Ég held þetta sé ekki góður tími fara heimsókn."],
|
| 45 |
+
["Sæl og blessuð Kristín, hvað er að frella af þér gamla??"],
|
| 46 |
+
["Hver á þenan bússtað? já eða nei."],
|
| 47 |
+
["Hafi þau svo látið gólfið þorna vel og síðan flotað það til lagfæringar eftir motturnar."],
|
| 48 |
+
],
|
| 49 |
+
).launch()
|