Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,94 +1,94 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from divscore import DivScore
|
| 3 |
-
import torch
|
| 4 |
-
import os
|
| 5 |
-
|
| 6 |
-
# Set environment variables for Hugging Face
|
| 7 |
-
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 8 |
-
|
| 9 |
-
# Initialize the DivScore detector with loading state
|
| 10 |
-
def load_model():
|
| 11 |
-
try:
|
| 12 |
-
detector = DivScore(
|
| 13 |
-
generalLM_name_or_path="
|
| 14 |
-
enhancedLM_name_or_path="RichardChenZH/DivScore_combined",
|
| 15 |
-
device="cuda:0" if torch.cuda.is_available() else "cpu",
|
| 16 |
-
use_bfloat16=True # Use bfloat16 for better memory efficiency
|
| 17 |
-
)
|
| 18 |
-
return detector
|
| 19 |
-
except Exception as e:
|
| 20 |
-
print(f"Error loading model: {str(e)}")
|
| 21 |
-
return None
|
| 22 |
-
|
| 23 |
-
# Global variable for the detector
|
| 24 |
-
detector = None
|
| 25 |
-
|
| 26 |
-
def detect_ai_text(text):
|
| 27 |
-
"""
|
| 28 |
-
Detect if the input text is AI-generated using DivScore.
|
| 29 |
-
Returns a tuple of (score, is_ai_generated, confidence)
|
| 30 |
-
"""
|
| 31 |
-
global detector
|
| 32 |
-
|
| 33 |
-
# Initialize detector if not already done
|
| 34 |
-
if detector is None:
|
| 35 |
-
detector = load_model()
|
| 36 |
-
if detector is None:
|
| 37 |
-
return "Error: Failed to load the model. Please try again later.", False, 0.0
|
| 38 |
-
|
| 39 |
-
if not text.strip():
|
| 40 |
-
return "Please enter some text to analyze.", False, 0.0
|
| 41 |
-
|
| 42 |
-
try:
|
| 43 |
-
score, entropy_score, ce_score = detector.compute_score(text)
|
| 44 |
-
|
| 45 |
-
# Based on the paper's findings, we use 0.15 as the threshold
|
| 46 |
-
is_ai_generated = score < 0.15
|
| 47 |
-
|
| 48 |
-
result = f"DivScore: {score:.4f}\nEntropy Score: {entropy_score:.4f}\nCE Score: {ce_score:.4f}"
|
| 49 |
-
return result, is_ai_generated
|
| 50 |
-
|
| 51 |
-
except Exception as e:
|
| 52 |
-
return f"Error occurred: {str(e)}", False, 0.0
|
| 53 |
-
|
| 54 |
-
# Create the Gradio interface with loading state
|
| 55 |
-
with gr.Blocks(title="DivScore AI Text Detector") as demo:
|
| 56 |
-
gr.Markdown("""
|
| 57 |
-
# DivScore AI Text Detector
|
| 58 |
-
|
| 59 |
-
This demo uses the DivScore model to detect if text was generated by an AI model.
|
| 60 |
-
Enter your text below to analyze it.
|
| 61 |
-
|
| 62 |
-
**Note:** The model may take a few moments to load on first use.
|
| 63 |
-
""")
|
| 64 |
-
|
| 65 |
-
with gr.Row():
|
| 66 |
-
with gr.Column():
|
| 67 |
-
text_input = gr.Textbox(
|
| 68 |
-
label="Input Text",
|
| 69 |
-
placeholder="Enter text to analyze...",
|
| 70 |
-
lines=5
|
| 71 |
-
)
|
| 72 |
-
submit_btn = gr.Button("Analyze Text")
|
| 73 |
-
|
| 74 |
-
with gr.Column():
|
| 75 |
-
result_output = gr.Textbox(label="Analysis Results")
|
| 76 |
-
ai_generated = gr.Checkbox(label="AI Generated", interactive=False)
|
| 77 |
-
|
| 78 |
-
gr.Examples(
|
| 79 |
-
examples=[
|
| 80 |
-
["The quick brown fox jumps over the lazy dog."],
|
| 81 |
-
["Based on the analysis of the data, we can conclude that the implementation of the new protocol has resulted in a statistically significant improvement in patient outcomes."]
|
| 82 |
-
],
|
| 83 |
-
inputs=text_input
|
| 84 |
-
)
|
| 85 |
-
|
| 86 |
-
submit_btn.click(
|
| 87 |
-
fn=detect_ai_text,
|
| 88 |
-
inputs=text_input,
|
| 89 |
-
outputs=[result_output, ai_generated]
|
| 90 |
-
)
|
| 91 |
-
|
| 92 |
-
if __name__ == "__main__":
|
| 93 |
-
demo.queue() # Enable queuing for better handling of multiple requests
|
| 94 |
demo.launch()
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from divscore import DivScore
|
| 3 |
+
import torch
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
# Set environment variables for Hugging Face
|
| 7 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 8 |
+
|
| 9 |
+
# Initialize the DivScore detector with loading state
|
| 10 |
+
def load_model():
|
| 11 |
+
try:
|
| 12 |
+
detector = DivScore(
|
| 13 |
+
generalLM_name_or_path="mistral-community/Mistral-7B-v0.2",
|
| 14 |
+
enhancedLM_name_or_path="RichardChenZH/DivScore_combined",
|
| 15 |
+
device="cuda:0" if torch.cuda.is_available() else "cpu",
|
| 16 |
+
use_bfloat16=True # Use bfloat16 for better memory efficiency
|
| 17 |
+
)
|
| 18 |
+
return detector
|
| 19 |
+
except Exception as e:
|
| 20 |
+
print(f"Error loading model: {str(e)}")
|
| 21 |
+
return None
|
| 22 |
+
|
| 23 |
+
# Global variable for the detector
|
| 24 |
+
detector = None
|
| 25 |
+
|
| 26 |
+
def detect_ai_text(text):
|
| 27 |
+
"""
|
| 28 |
+
Detect if the input text is AI-generated using DivScore.
|
| 29 |
+
Returns a tuple of (score, is_ai_generated, confidence)
|
| 30 |
+
"""
|
| 31 |
+
global detector
|
| 32 |
+
|
| 33 |
+
# Initialize detector if not already done
|
| 34 |
+
if detector is None:
|
| 35 |
+
detector = load_model()
|
| 36 |
+
if detector is None:
|
| 37 |
+
return "Error: Failed to load the model. Please try again later.", False, 0.0
|
| 38 |
+
|
| 39 |
+
if not text.strip():
|
| 40 |
+
return "Please enter some text to analyze.", False, 0.0
|
| 41 |
+
|
| 42 |
+
try:
|
| 43 |
+
score, entropy_score, ce_score = detector.compute_score(text)
|
| 44 |
+
|
| 45 |
+
# Based on the paper's findings, we use 0.15 as the threshold
|
| 46 |
+
is_ai_generated = score < 0.15
|
| 47 |
+
|
| 48 |
+
result = f"DivScore: {score:.4f}\nEntropy Score: {entropy_score:.4f}\nCE Score: {ce_score:.4f}"
|
| 49 |
+
return result, is_ai_generated
|
| 50 |
+
|
| 51 |
+
except Exception as e:
|
| 52 |
+
return f"Error occurred: {str(e)}", False, 0.0
|
| 53 |
+
|
| 54 |
+
# Create the Gradio interface with loading state
|
| 55 |
+
with gr.Blocks(title="DivScore AI Text Detector") as demo:
|
| 56 |
+
gr.Markdown("""
|
| 57 |
+
# DivScore AI Text Detector
|
| 58 |
+
|
| 59 |
+
This demo uses the DivScore model to detect if text was generated by an AI model.
|
| 60 |
+
Enter your text below to analyze it.
|
| 61 |
+
|
| 62 |
+
**Note:** The model may take a few moments to load on first use.
|
| 63 |
+
""")
|
| 64 |
+
|
| 65 |
+
with gr.Row():
|
| 66 |
+
with gr.Column():
|
| 67 |
+
text_input = gr.Textbox(
|
| 68 |
+
label="Input Text",
|
| 69 |
+
placeholder="Enter text to analyze...",
|
| 70 |
+
lines=5
|
| 71 |
+
)
|
| 72 |
+
submit_btn = gr.Button("Analyze Text")
|
| 73 |
+
|
| 74 |
+
with gr.Column():
|
| 75 |
+
result_output = gr.Textbox(label="Analysis Results")
|
| 76 |
+
ai_generated = gr.Checkbox(label="AI Generated", interactive=False)
|
| 77 |
+
|
| 78 |
+
gr.Examples(
|
| 79 |
+
examples=[
|
| 80 |
+
["The quick brown fox jumps over the lazy dog."],
|
| 81 |
+
["Based on the analysis of the data, we can conclude that the implementation of the new protocol has resulted in a statistically significant improvement in patient outcomes."]
|
| 82 |
+
],
|
| 83 |
+
inputs=text_input
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
submit_btn.click(
|
| 87 |
+
fn=detect_ai_text,
|
| 88 |
+
inputs=text_input,
|
| 89 |
+
outputs=[result_output, ai_generated]
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
if __name__ == "__main__":
|
| 93 |
+
demo.queue() # Enable queuing for better handling of multiple requests
|
| 94 |
demo.launch()
|