VictorM-Coder commited on
Commit
f18fa47
·
verified ·
1 Parent(s): 2322788

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -11
app.py CHANGED
@@ -1,7 +1,7 @@
1
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  import torch, gradio as gr
3
 
4
-
5
  model_name = "alykassem/FLAN-T5-Paraphraser"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
@@ -10,28 +10,36 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
  model = model.to(device)
11
  model.eval()
12
 
13
- def paraphrase_dipper(text, diversity=0.5, reordering=0.5):
14
- inputs = tokenizer([text], return_tensors="pt", truncation=True, padding=True).to(device)
 
 
 
 
 
15
  outputs = model.generate(
16
  **inputs,
17
  max_new_tokens=256,
18
- top_p=1.0,
19
- diversity_penalty=float(diversity),
20
- num_reorder=float(reordering),
21
  do_sample=True,
 
 
 
 
22
  )
 
23
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
24
 
 
25
  iface = gr.Interface(
26
- fn=paraphrase_dipper,
27
  inputs=[
28
  gr.Textbox(lines=8, placeholder="Paste full text here..."),
29
- gr.Slider(0.0, 1.0, step=0.1, value=0.5, label="Lexical Diversity"),
30
- gr.Slider(0.0, 1.0, step=0.1, value=0.5, label="Reordering Amount")
31
  ],
32
  outputs=gr.Textbox(label="Paraphrased & Humanized Text"),
33
- title="DIPPER Paraphraser (AI-Detector Evading)",
34
- description="Paraphrase full text with diversity and reordering control to reduce AI detection."
35
  )
36
 
37
  iface.launch()
 
1
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
  import torch, gradio as gr
3
 
4
+ # Load Model
5
  model_name = "alykassem/FLAN-T5-Paraphraser"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
 
10
  model = model.to(device)
11
  model.eval()
12
 
13
+ # Paraphrasing Function
14
+ def paraphrase_flan(text, diversity=0.7, temperature=0.9):
15
+ if not text.strip():
16
+ return "⚠️ Please enter some text"
17
+
18
+ inputs = tokenizer([f"paraphrase: {text}"], return_tensors="pt", truncation=True, padding=True).to(device)
19
+
20
  outputs = model.generate(
21
  **inputs,
22
  max_new_tokens=256,
 
 
 
23
  do_sample=True,
24
+ top_p=0.92, # nucleus sampling
25
+ temperature=float(temperature), # creativity
26
+ diversity_penalty=float(diversity), # encourages lexical variety
27
+ num_return_sequences=1
28
  )
29
+
30
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
31
 
32
+ # Gradio UI
33
  iface = gr.Interface(
34
+ fn=paraphrase_flan,
35
  inputs=[
36
  gr.Textbox(lines=8, placeholder="Paste full text here..."),
37
+ gr.Slider(0.0, 1.0, step=0.1, value=0.7, label="Lexical Diversity"),
38
+ gr.Slider(0.5, 1.5, step=0.1, value=0.9, label="Temperature")
39
  ],
40
  outputs=gr.Textbox(label="Paraphrased & Humanized Text"),
41
+ title="FLAN-T5 Paraphraser (Humanizer)",
42
+ description="High-quality paraphrasing with adjustable diversity and temperature to reduce AI detection."
43
  )
44
 
45
  iface.launch()