Spaces:
Sleeping
Sleeping
File size: 6,547 Bytes
42057d6 c9a6b37 42057d6 c9a6b37 42057d6 c9a6b37 42057d6 c9a6b37 42057d6 c9a6b37 42057d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
import gradio as gr
import os
from detector import analyze_text, get_components
# Pre-load model
print("Starting AI Text Detector...")
try:
get_components()
model_status = "β
Model loaded successfully!"
except Exception as e:
model_status = f"β οΈ Model loading issue: {str(e)}"
print(model_status)
# Custom CSS for better styling
css = """
.gradio-container {
max-width: 1200px !important;
}
.result-human {
padding: 10px;
border-radius: 5px;
background: #f0f8f0;
border-left: 4px solid #4CAF50;
}
.result-ai {
padding: 10px;
border-radius: 5px;
background: #fff0f0;
border-left: 4px solid #f44336;
}
.chunk-human {
background: #f8fff8;
margin: 5px 0;
padding: 8px;
border-radius: 3px;
border-left: 3px solid #4CAF50;
}
.chunk-ai {
background: #fff8f8;
margin: 5px 0;
padding: 8px;
border-radius: 3px;
border-left: 3px solid #f44336;
}
.confidence-high { color: #388E3C; }
.confidence-medium { color: #F57C00; }
.confidence-low { color: #D32F2F; }
"""
def analyze_text_interface(text, threshold, chunk_size):
"""
Interface function for Gradio
"""
if not text or not text.strip():
return "β Please enter some text to analyze.", "", ""
try:
result = analyze_text(text, threshold=threshold, chunk_size=chunk_size)
if "error" in result:
return f"β Error: {result['error']}", "", ""
# Overall result
overall_html = f"""
<div class="result-{result['overall_type'].lower()}">
<h3>Overall Result: {result['overall_type']}</h3>
<p><strong>Confidence:</strong> {result['overall_confidence']:.2%}</p>
<p><strong>AI Score:</strong> {result['overall_score']:.3f}</p>
<p><strong>AI Artifacts Detected:</strong> {'β
Yes' if result['has_artifacts'] else 'β No'}</p>
<p><strong>Chunk Analysis:</strong> {result['ai_chunks']} AI / {result['human_chunks']} Human</p>
</div>
"""
# Chunk details
chunk_html = "<h3>Detailed Chunk Analysis:</h3>"
for i, chunk in enumerate(result['chunks']):
confidence_class = "confidence-high" if chunk['confidence'] > 0.8 else "confidence-medium" if chunk['confidence'] > 0.6 else "confidence-low"
chunk_html += f"""
<div class="chunk-{chunk['type'].lower()}">
<strong>Chunk {i+1}:</strong> {chunk['type']}
<br><small>Confidence: <span class="{confidence_class}">{chunk['confidence']:.2%}</span></small>
<br><small>Text: "{chunk['text'][:100]}{'...' if len(chunk['text']) > 100 else ''}"</small>
</div>
"""
# Raw data for download
raw_data = {
"overall_type": result['overall_type'],
"overall_confidence": result['overall_confidence'],
"overall_score": result['overall_score'],
"has_artifacts": result['has_artifacts'],
"chunk_analysis": {
"ai_chunks": result['ai_chunks'],
"human_chunks": result['human_chunks'],
"total_chunks": result['total_chunks']
},
"chunks": result['chunks']
}
return overall_html, chunk_html, str(raw_data)
except Exception as e:
return f"β Analysis failed: {str(e)}", "", ""
# Example texts
examples = [
["This is a sample text written by a human. It contains natural variations in writing style and occasional imperfections that make it authentic."],
["The aforementioned textual content exhibits characteristics consistent with AI-generated material, including syntactic patterns and lexical choices commonly associated with large language models."],
["Hello world! This is a test. I hope this works correctly. The weather is nice today."]
]
# Create Gradio interface
with gr.Blocks(css=css, title="AI Text Detector") as demo:
gr.Markdown(
"""
# π AI Text Detector
*Detect AI-generated text using advanced machine learning models*
**Model Status:** {}
""".format(model_status)
)
with gr.Row():
with gr.Column():
text_input = gr.Textbox(
label="Input Text",
placeholder="Paste or type the text you want to analyze here...",
lines=8,
max_lines=20
)
with gr.Row():
threshold = gr.Slider(
minimum=0.1,
maximum=0.9,
value=0.5,
step=0.05,
label="Detection Threshold",
info="Higher values = more strict AI detection"
)
chunk_size = gr.Slider(
minimum=40,
maximum=200,
value=80,
step=10,
label="Chunk Size (tokens)",
info="Smaller chunks = more detailed analysis"
)
analyze_btn = gr.Button("Analyze Text", variant="primary")
gr.Examples(
examples=examples,
inputs=text_input,
label="Try these examples:"
)
with gr.Column():
overall_output = gr.HTML(label="Overall Result")
chunk_output = gr.HTML(label="Chunk Details")
raw_output = gr.Textbox(
label="Raw Data (for download)",
lines=4,
max_lines=10
)
# Footer
gr.Markdown(
"""
---
**How it works:**
- Text is split into meaningful chunks
- Each chunk is analyzed by the AI detection model
- Results are aggregated for overall classification
- Built with `abhi099k/ai-text-detector-v-n4.0` model
**Note:** This tool provides probabilistic estimates and should be used as one of several indicators when evaluating text authenticity.
"""
)
# Connect the function
analyze_btn.click(
fn=analyze_text_interface,
inputs=[text_input, threshold, chunk_size],
outputs=[overall_output, chunk_output, raw_output]
)
# Launch the app
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
) |