Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,11 +10,9 @@ tokenizer = RobertaTokenizer.from_pretrained(model_dir)
|
|
| 10 |
model = RobertaForSequenceClassification.from_pretrained(model_dir)
|
| 11 |
#pipe = pipeline("text-classification", model="thugCodeNinja/robertatemp")
|
| 12 |
pipe = pipeline("text-classification",model=model,tokenizer=tokenizer)
|
| 13 |
-
def process_text(input_text
|
| 14 |
if input_text:
|
| 15 |
text = input_text
|
| 16 |
-
elif input_file is not None:
|
| 17 |
-
text = input_file
|
| 18 |
inputs = tokenizer(text, return_tensors="pt")
|
| 19 |
with torch.no_grad():
|
| 20 |
logits = model(**inputs).logits
|
|
@@ -59,9 +57,8 @@ def process_text(input_text, input_file):
|
|
| 59 |
return processed_result, prob, final_label, shap_plot_html,similar_articles
|
| 60 |
|
| 61 |
text_input = gr.Textbox(label="Enter text")
|
| 62 |
-
file_input = gr.File(label="Upload a text file")
|
| 63 |
outputs = [gr.Textbox(label="Processed text"), gr.Textbox(label="Probability"), gr.Textbox(label="Label"), gr.HTML(label="SHAP Plot"),gr.Dataframe(label="Similar Articles", headers=["Title", "Link"],row_count=5)]
|
| 64 |
title = "Group 2- ChatGPT text detection module"
|
| 65 |
description = '''Please upload text files and text input responsibly and await the explainable results. The approach in place includes finetuning a Roberta model for text classification.Once the classifications are done the decision is exaplined thorugh the SHAP text plot.
|
| 66 |
The probability is particularly explained by the attention plots through SHAP'''
|
| 67 |
-
gr.Interface(fn=process_text,title=title,description=description, inputs=[text_input
|
|
|
|
| 10 |
model = RobertaForSequenceClassification.from_pretrained(model_dir)
|
| 11 |
#pipe = pipeline("text-classification", model="thugCodeNinja/robertatemp")
|
| 12 |
pipe = pipeline("text-classification",model=model,tokenizer=tokenizer)
|
| 13 |
+
def process_text(input_text):
|
| 14 |
if input_text:
|
| 15 |
text = input_text
|
|
|
|
|
|
|
| 16 |
inputs = tokenizer(text, return_tensors="pt")
|
| 17 |
with torch.no_grad():
|
| 18 |
logits = model(**inputs).logits
|
|
|
|
| 57 |
return processed_result, prob, final_label, shap_plot_html,similar_articles
|
| 58 |
|
| 59 |
text_input = gr.Textbox(label="Enter text")
|
|
|
|
| 60 |
outputs = [gr.Textbox(label="Processed text"), gr.Textbox(label="Probability"), gr.Textbox(label="Label"), gr.HTML(label="SHAP Plot"),gr.Dataframe(label="Similar Articles", headers=["Title", "Link"],row_count=5)]
|
| 61 |
title = "Group 2- ChatGPT text detection module"
|
| 62 |
description = '''Please upload text files and text input responsibly and await the explainable results. The approach in place includes finetuning a Roberta model for text classification.Once the classifications are done the decision is exaplined thorugh the SHAP text plot.
|
| 63 |
The probability is particularly explained by the attention plots through SHAP'''
|
| 64 |
+
gr.Interface(fn=process_text,title=title,description=description, inputs=[text_input], outputs=outputs).launch()
|