AIDetector / app.py
Jay-Rajput's picture
ai detector new
f659ec0
raw
history blame
12.6 kB
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import numpy as np
from scipy import stats
import re
from collections import Counter
import math
class AdvancedAITextDetector:
def __init__(self):
"""Initialize the AI Text Detector with multiple detection methods"""
# Load pre-trained model for AI detection
self.model_name = "Hello-SimpleAI/chatgpt-detector-roberta"
try:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_name)
self.model.eval()
self.model_loaded = True
except:
print("Warning: Could not load transformer model. Using statistical methods only.")
self.model_loaded = False
def calculate_perplexity_score(self, text):
"""Calculate perplexity-based features"""
words = text.split()
if len(words) < 2:
return 0.5
# Simple bigram perplexity approximation
bigrams = [(words[i], words[i+1]) for i in range(len(words)-1)]
unique_bigrams = len(set(bigrams))
total_bigrams = len(bigrams)
# AI text tends to have less variation in bigrams
diversity_score = unique_bigrams / total_bigrams if total_bigrams > 0 else 0
return diversity_score
def calculate_burstiness(self, text):
"""Calculate burstiness - human text tends to be more bursty"""
sentences = re.split(r'[.!?]+', text)
sentence_lengths = [len(s.split()) for s in sentences if s.strip()]
if len(sentence_lengths) < 2:
return 0.5
# Calculate variance in sentence lengths
variance = np.var(sentence_lengths)
mean_length = np.mean(sentence_lengths)
# Normalize burstiness score
burstiness = variance / (mean_length + 1) if mean_length > 0 else 0
return min(burstiness / 10, 1.0) # Normalize to 0-1
def calculate_repetition_score(self, text):
"""Calculate repetition patterns - AI tends to repeat phrases more"""
words = text.lower().split()
# Check for repeated phrases (3-grams)
if len(words) < 3:
return 0.5
trigrams = [' '.join(words[i:i+3]) for i in range(len(words)-2)]
trigram_counts = Counter(trigrams)
repeated_trigrams = sum(1 for count in trigram_counts.values() if count > 1)
repetition_ratio = repeated_trigrams / len(trigrams) if trigrams else 0
return repetition_ratio
def calculate_vocabulary_diversity(self, text):
"""Calculate vocabulary diversity - AI text often has less diverse vocabulary"""
words = re.findall(r'\b\w+\b', text.lower())
if not words:
return 0.5
unique_words = set(words)
diversity = len(unique_words) / len(words)
# Type-token ratio
return diversity
def calculate_punctuation_patterns(self, text):
"""Analyze punctuation patterns - AI has more regular punctuation"""
sentences = re.split(r'[.!?]+', text)
punct_variance = []
for sentence in sentences:
if sentence.strip():
punct_count = len(re.findall(r'[,;:\-β€”()]', sentence))
word_count = len(sentence.split())
if word_count > 0:
punct_variance.append(punct_count / word_count)
if not punct_variance:
return 0.5
# AI text tends to have more consistent punctuation density
variance = np.var(punct_variance)
return 1 - min(variance * 10, 1.0) # Lower variance = more likely AI
def detect_ai_statistical(self, text):
"""Combine statistical methods for AI detection"""
if len(text.strip()) < 50:
return 0.5, "Text too short for accurate analysis"
# Calculate various features
perplexity_score = self.calculate_perplexity_score(text)
burstiness = self.calculate_burstiness(text)
repetition = self.calculate_repetition_score(text)
vocab_diversity = self.calculate_vocabulary_diversity(text)
punct_patterns = self.calculate_punctuation_patterns(text)
# Weighted combination of features
# Lower perplexity, lower burstiness, higher repetition, lower diversity = more likely AI
ai_score = (
(1 - perplexity_score) * 0.2 + # Low diversity in bigrams
(1 - burstiness) * 0.25 + # Low burstiness
repetition * 0.2 + # High repetition
(1 - vocab_diversity) * 0.2 + # Low vocabulary diversity
punct_patterns * 0.15 # Regular punctuation
)
return ai_score, {
"perplexity_score": perplexity_score,
"burstiness": burstiness,
"repetition": repetition,
"vocab_diversity": vocab_diversity,
"punct_patterns": punct_patterns
}
def detect_ai_transformer(self, text):
"""Use transformer model for AI detection"""
if not self.model_loaded:
return 0.5, "Model not loaded"
try:
inputs = self.tokenizer(text, return_tensors="pt", truncation=True,
max_length=512, padding=True)
with torch.no_grad():
outputs = self.model(**inputs)
logits = outputs.logits
probabilities = torch.softmax(logits, dim=-1)
# Assuming class 1 is AI-generated
ai_probability = probabilities[0][1].item()
return ai_probability, "Transformer model prediction"
except Exception as e:
return 0.5, f"Error in transformer model: {str(e)}"
def detect(self, text):
"""Main detection method combining multiple approaches"""
if not text or len(text.strip()) < 20:
return {
"ai_probability": 0.5,
"classification": "Undetermined",
"confidence": "Low",
"explanation": "Text too short for accurate analysis. Please provide at least 50 characters.",
"detailed_scores": {}
}
# Get statistical analysis
stat_score, stat_details = self.detect_ai_statistical(text)
# Get transformer model prediction if available
if self.model_loaded:
transformer_score, _ = self.detect_ai_transformer(text)
# Weighted average of both methods
final_score = (transformer_score * 0.7 + stat_score * 0.3)
else:
final_score = stat_score
# Determine classification and confidence
if final_score >= 0.8:
classification = "AI-Generated"
confidence = "High"
elif final_score >= 0.6:
classification = "Likely AI-Generated"
confidence = "Medium"
elif final_score >= 0.4:
classification = "Uncertain"
confidence = "Low"
elif final_score >= 0.2:
classification = "Likely Human-Written"
confidence = "Medium"
else:
classification = "Human-Written"
confidence = "High"
# Create detailed explanation
explanation = self._generate_explanation(final_score, stat_details if isinstance(stat_details, dict) else {})
return {
"ai_probability": round(final_score * 100, 2),
"classification": classification,
"confidence": confidence,
"explanation": explanation,
"detailed_scores": stat_details if isinstance(stat_details, dict) else {}
}
def _generate_explanation(self, score, details):
"""Generate human-readable explanation of the detection result"""
explanations = []
if score >= 0.7:
explanations.append("This text shows strong indicators of AI generation.")
elif score >= 0.3:
explanations.append("This text shows mixed characteristics.")
else:
explanations.append("This text appears to be human-written.")
if details:
if details.get('burstiness', 0.5) < 0.3:
explanations.append("β€’ Low sentence length variation (typical of AI)")
elif details.get('burstiness', 0.5) > 0.7:
explanations.append("β€’ High sentence length variation (typical of humans)")
if details.get('vocab_diversity', 0.5) < 0.4:
explanations.append("β€’ Limited vocabulary diversity")
elif details.get('vocab_diversity', 0.5) > 0.6:
explanations.append("β€’ Rich vocabulary diversity")
if details.get('repetition', 0) > 0.2:
explanations.append("β€’ Notable phrase repetition detected")
if details.get('punct_patterns', 0.5) > 0.7:
explanations.append("β€’ Regular punctuation patterns (AI-like)")
return " ".join(explanations)
# Initialize detector
detector = AdvancedAITextDetector()
def analyze_text(text):
"""Gradio interface function"""
result = detector.detect(text)
# Format output for Gradio
output = f"""
## Detection Result
**Classification:** {result['classification']}
**AI Probability:** {result['ai_probability']}%
**Confidence Level:** {result['confidence']}
### Analysis Details
{result['explanation']}
### Detailed Metrics
"""
if result['detailed_scores']:
for metric, value in result['detailed_scores'].items():
metric_name = metric.replace('_', ' ').title()
output += f"- {metric_name}: {round(value, 3)}\n"
# Create a simple bar chart visualization
ai_prob = result['ai_probability']
human_prob = 100 - ai_prob
bar_chart = f"""
### Probability Distribution
```
AI-Generated: {'β–ˆ' * int(ai_prob/5)}{'β–‘' * (20-int(ai_prob/5))} {ai_prob}%
Human-Written: {'β–ˆ' * int(human_prob/5)}{'β–‘' * (20-int(human_prob/5))} {human_prob}%
```
"""
return output + bar_chart
# Create Gradio interface
interface = gr.Interface(
fn=analyze_text,
inputs=gr.Textbox(
lines=10,
placeholder="Paste the text you want to analyze here...",
label="Input Text"
),
outputs=gr.Markdown(label="Analysis Result"),
title="πŸ” Advanced AI Text Detector",
description="""
This advanced AI text detector uses multiple techniques to identify AI-generated content:
- **Transformer-based detection** using fine-tuned RoBERTa model
- **Statistical analysis** including burstiness, perplexity, and repetition patterns
- **Linguistic features** such as vocabulary diversity and punctuation patterns
The tool is particularly effective at detecting text from ChatGPT, GPT-4, and similar language models.
For best results, provide at least 100 words of text.
""",
examples=[
["The impact of artificial intelligence on modern society cannot be overstated. From healthcare to transportation, AI systems are revolutionizing how we live and work. Machine learning algorithms process vast amounts of data to identify patterns and make predictions with unprecedented accuracy. In medical diagnosis, AI assists doctors in detecting diseases earlier than ever before. Autonomous vehicles promise to transform our cities and reduce traffic accidents. However, these advancements also raise important ethical questions about privacy, employment, and human autonomy that society must carefully consider."],
["So I was walking down the street yesterday, right? And this crazy thing happened - I mean, you won't believe it. There was this dog, just a regular golden retriever, but it was wearing these ridiculous sunglasses. Like, who puts sunglasses on a dog? Anyway, the owner was this old lady, must've been like 80 or something, and she was just chatting away on her phone, completely oblivious. The dog looked so confused! I couldn't help but laugh. Sometimes you see the weirdest stuff when you're just out and about, you know?"]
],
theme=gr.themes.Soft(),
analytics_enabled=False
)
if __name__ == "__main__":
interface.launch()