SharvNey commited on
Commit
5bc439e
·
verified ·
1 Parent(s): f2ae882

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -5
app.py CHANGED
@@ -4,25 +4,30 @@ import numpy as np
4
  import gradio as gr
5
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
6
 
7
- # Change to your Hugging Face model repo ID
8
  MODEL_ID = "SharvNey/capstone_project"
9
 
10
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
11
- model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
 
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
  model.to(device)
14
  model.eval()
15
 
 
16
  def classify_text(text):
17
  if not text.strip():
18
  return {"🧑 Human-Written": 0.0, "🤖 AI-Generated": 0.0}
 
19
  enc = tokenizer(text, truncation=True, padding=True, max_length=256, return_tensors="pt")
20
- enc = {k: v.to(device) for k,v in enc.items()}
21
  with torch.no_grad():
22
  out = model(**enc)
23
  probs = torch.nn.functional.softmax(out.logits, dim=-1).cpu().numpy()[0]
 
24
  return {"🧑 Human-Written": float(probs[0]), "🤖 AI-Generated": float(probs[1])}
25
 
 
26
  demo = gr.Interface(
27
  fn=classify_text,
28
  inputs=gr.Textbox(lines=8, placeholder="Paste text here..."),
@@ -32,4 +37,12 @@ demo = gr.Interface(
32
  )
33
 
34
  if __name__ == "__main__":
35
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
4
  import gradio as gr
5
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
6
 
7
+ # 🔹 Change this to your actual Hugging Face model repo ID
8
  MODEL_ID = "SharvNey/capstone_project"
9
 
10
+ # Load model & tokenizer
11
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_auth_token=os.getenv("HF_TOKEN"))
12
+ model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID, use_auth_token=os.getenv("HF_TOKEN"))
13
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
  model.to(device)
15
  model.eval()
16
 
17
+ # Prediction function
18
  def classify_text(text):
19
  if not text.strip():
20
  return {"🧑 Human-Written": 0.0, "🤖 AI-Generated": 0.0}
21
+
22
  enc = tokenizer(text, truncation=True, padding=True, max_length=256, return_tensors="pt")
23
+ enc = {k: v.to(device) for k, v in enc.items()}
24
  with torch.no_grad():
25
  out = model(**enc)
26
  probs = torch.nn.functional.softmax(out.logits, dim=-1).cpu().numpy()[0]
27
+
28
  return {"🧑 Human-Written": float(probs[0]), "🤖 AI-Generated": float(probs[1])}
29
 
30
+ # Gradio app
31
  demo = gr.Interface(
32
  fn=classify_text,
33
  inputs=gr.Textbox(lines=8, placeholder="Paste text here..."),
 
37
  )
38
 
39
  if __name__ == "__main__":
40
+ # Detect if running on Hugging Face Spaces
41
+ in_spaces = os.environ.get("SYSTEM") == "spaces"
42
+
43
+ if in_spaces:
44
+ # Spaces handles public link, no share=True needed
45
+ demo.launch(server_name="0.0.0.0", server_port=7860)
46
+ else:
47
+ # Local/Colab → provide share=True for public link
48
+ demo.launch(share=True)