onkar-waghmode commited on
Commit
0521579
·
1 Parent(s): 8e421d8
Files changed (1) hide show
  1. app.py +20 -2
app.py CHANGED
@@ -10,6 +10,8 @@ import torch
10
  import numpy as np
11
  from typing import List, Dict, Tuple
12
  import logging
 
 
13
 
14
  # Setup logging
15
  logging.basicConfig(level=logging.INFO)
@@ -32,11 +34,24 @@ t5_tokenizer = AutoTokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws")
32
  t5_model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws")
33
  t5_model.to(device)
34
 
 
35
  similarity_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2", device=device)
36
  nlp = spacy.load("en_core_web_sm")
37
 
 
 
 
38
  print("Models loaded successfully!")
39
 
 
 
 
 
 
 
 
 
 
40
  # ============================================================================
41
  # STAGE 1: PARAPHRASING WITH T5 MODEL
42
  # ============================================================================
@@ -405,6 +420,7 @@ def humanize_text(
405
 
406
  # Calculate similarity
407
  similarity = calculate_similarity(input_text, result)
 
408
 
409
  # Generate status message
410
  if not stages_applied:
@@ -412,7 +428,7 @@ def humanize_text(
412
  else:
413
  status = f"✅ Successfully applied: {', '.join(stages_applied)}"
414
 
415
- return result, similarity, status
416
 
417
  except Exception as e:
418
  logger.error(f"Error in humanization: {e}")
@@ -455,6 +471,8 @@ def create_gradio_interface():
455
  with gr.Row():
456
  similarity_output = gr.Number(label="Similarity Score", precision=4)
457
  status_output = gr.Textbox(label="Status",interactive=False,lines=2, max_lines=10)
 
 
458
 
459
  with gr.Column(scale=1):
460
  gr.Markdown("## 🎛️ Pipeline Configuration")
@@ -502,7 +520,7 @@ def create_gradio_interface():
502
  hedge_prob, booster_prob, connector_prob, starter_prob,
503
  split_prob, merge_prob, min_split_length, max_merge_length
504
  ],
505
- outputs=[output_text, similarity_output, status_output]
506
  )
507
 
508
  clear_btn.click(
 
10
  import numpy as np
11
  from typing import List, Dict, Tuple
12
  import logging
13
+ from transformers import pipeline
14
+
15
 
16
  # Setup logging
17
  logging.basicConfig(level=logging.INFO)
 
34
  t5_model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws")
35
  t5_model.to(device)
36
 
37
+
38
  similarity_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2", device=device)
39
  nlp = spacy.load("en_core_web_sm")
40
 
41
+
42
+ ai_detector_pipe = pipeline("text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
43
+
44
  print("Models loaded successfully!")
45
 
46
+
47
+
48
+ # ============================================================================
49
+ # AI Detection
50
+ # ============================================================================
51
+ def predict_ai_content(text):
52
+ res = ai_detector_pipe(text)[0]
53
+ return res['label'], res['score']
54
+
55
  # ============================================================================
56
  # STAGE 1: PARAPHRASING WITH T5 MODEL
57
  # ============================================================================
 
420
 
421
  # Calculate similarity
422
  similarity = calculate_similarity(input_text, result)
423
+ ai_content_label, ai_content_score = predict_ai_content(result)
424
 
425
  # Generate status message
426
  if not stages_applied:
 
428
  else:
429
  status = f"✅ Successfully applied: {', '.join(stages_applied)}"
430
 
431
+ return result, similarity, status,ai_content_label, ai_content_score
432
 
433
  except Exception as e:
434
  logger.error(f"Error in humanization: {e}")
 
471
  with gr.Row():
472
  similarity_output = gr.Number(label="Similarity Score", precision=4)
473
  status_output = gr.Textbox(label="Status",interactive=False,lines=2, max_lines=10)
474
+ ai_content_label = gr.Textbox(label="AI Content Detection",interactive=False)
475
+ ai_content_score = gr.Number(label="AI Content Score", precision=4,interactive=False)
476
 
477
  with gr.Column(scale=1):
478
  gr.Markdown("## 🎛️ Pipeline Configuration")
 
520
  hedge_prob, booster_prob, connector_prob, starter_prob,
521
  split_prob, merge_prob, min_split_length, max_merge_length
522
  ],
523
+ outputs=[output_text, similarity_output, status_output,ai_content_label, ai_content_score]
524
  )
525
 
526
  clear_btn.click(