yagnik12 commited on
Commit
ae05987
·
verified ·
1 Parent(s): f96aa0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -22
app.py CHANGED
@@ -1,27 +1,18 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSequenceClassification, GPT2LMHeadModel
3
  import torch
4
  import math
5
 
6
- # -------------------------------
7
- # Load Models
8
- # -------------------------------
9
-
10
- # RoBERTa AI Detector (pretrained model from Hugging Face)
11
- detector_name = "Hello-SimpleAI/AI-Text-Detector-RoBERTa"
12
  detector_tokenizer = AutoTokenizer.from_pretrained(detector_name)
13
  detector_model = AutoModelForSequenceClassification.from_pretrained(detector_name)
14
 
15
- # GPT-2 for perplexity calculation
16
  gpt2_name = "gpt2"
17
  gpt2_tokenizer = AutoTokenizer.from_pretrained(gpt2_name)
18
  gpt2_model = GPT2LMHeadModel.from_pretrained(gpt2_name)
19
 
20
-
21
- # -------------------------------
22
- # Helper Functions
23
- # -------------------------------
24
-
25
  def compute_perplexity(text: str) -> float:
26
  """Calculate perplexity using GPT-2 language model."""
27
  enc = gpt2_tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
@@ -30,12 +21,11 @@ def compute_perplexity(text: str) -> float:
30
  loss = gpt2_model(input_ids, labels=input_ids).loss
31
  return math.exp(loss.item())
32
 
33
-
34
  def analyze_text(user_text: str):
35
  if not user_text.strip():
36
  return {"error": "Please enter some text to analyze."}
37
 
38
- # Model 1: RoBERTa detector
39
  inputs = detector_tokenizer(user_text, return_tensors="pt", truncation=True, max_length=512)
40
  with torch.no_grad():
41
  logits = detector_model(**inputs).logits
@@ -52,17 +42,12 @@ def analyze_text(user_text: str):
52
  final_human = 1 - final_ai
53
 
54
  return {
55
- "RoBERTa AI Probability": round(ai_prob * 100, 2),
56
  "Perplexity-based AI Probability": round(ppl_score * 100, 2),
57
  "Final AI Probability (avg)": round(final_ai * 100, 2),
58
  "Final Human Probability (avg)": round(final_human * 100, 2),
59
  }
60
 
61
-
62
- # -------------------------------
63
- # Gradio UI
64
- # -------------------------------
65
-
66
  with gr.Blocks() as demo:
67
  gr.Markdown("# 🔍 Free AI vs Human Text Detector (Demo)")
68
 
@@ -78,6 +63,5 @@ with gr.Blocks() as demo:
78
 
79
  run_btn.click(analyze_text, inputs=user_input, outputs=output)
80
 
81
-
82
  if __name__ == "__main__":
83
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
  import torch
4
  import math
5
 
6
+ # Load the ChatGPT detector model
7
+ detector_name = "Hello-SimpleAI/chatgpt-detector-roberta"
 
 
 
 
8
  detector_tokenizer = AutoTokenizer.from_pretrained(detector_name)
9
  detector_model = AutoModelForSequenceClassification.from_pretrained(detector_name)
10
 
11
+ # Load GPT-2 for perplexity calculation
12
  gpt2_name = "gpt2"
13
  gpt2_tokenizer = AutoTokenizer.from_pretrained(gpt2_name)
14
  gpt2_model = GPT2LMHeadModel.from_pretrained(gpt2_name)
15
 
 
 
 
 
 
16
  def compute_perplexity(text: str) -> float:
17
  """Calculate perplexity using GPT-2 language model."""
18
  enc = gpt2_tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
 
21
  loss = gpt2_model(input_ids, labels=input_ids).loss
22
  return math.exp(loss.item())
23
 
 
24
  def analyze_text(user_text: str):
25
  if not user_text.strip():
26
  return {"error": "Please enter some text to analyze."}
27
 
28
+ # Model 1: ChatGPT detector
29
  inputs = detector_tokenizer(user_text, return_tensors="pt", truncation=True, max_length=512)
30
  with torch.no_grad():
31
  logits = detector_model(**inputs).logits
 
42
  final_human = 1 - final_ai
43
 
44
  return {
45
+ "ChatGPT AI Probability": round(ai_prob * 100, 2),
46
  "Perplexity-based AI Probability": round(ppl_score * 100, 2),
47
  "Final AI Probability (avg)": round(final_ai * 100, 2),
48
  "Final Human Probability (avg)": round(final_human * 100, 2),
49
  }
50
 
 
 
 
 
 
51
  with gr.Blocks() as demo:
52
  gr.Markdown("# 🔍 Free AI vs Human Text Detector (Demo)")
53
 
 
63
 
64
  run_btn.click(analyze_text, inputs=user_input, outputs=output)
65
 
 
66
  if __name__ == "__main__":
67
  demo.launch(server_name="0.0.0.0", server_port=7860)