ai-assist-sh commited on
Commit
b273065
·
verified ·
1 Parent(s): e98be26

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +1 -0
  2. app.py +47 -45
  3. requirements.txt +1 -0
README.md CHANGED
@@ -9,3 +9,4 @@ app_file: app.py
9
  pinned: false
10
  ---
11
 
 
 
9
  pinned: false
10
  ---
11
 
12
+
app.py CHANGED
@@ -1,20 +1,17 @@
1
- import os, re, gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
3
 
 
4
  os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
5
 
6
  URL_MODEL_ID = "CrabInHoney/urlbert-tiny-v4-malicious-url-classifier"
7
  URL_LABEL_MAP = {"LABEL_0":"benign","LABEL_1":"defacement","LABEL_2":"malware","LABEL_3":"phishing"}
8
  URL_RE = re.compile(r"""(?xi)\b(?:https?://|www\.)[a-z0-9\-._~%]+(?:/[^\s<>"']*)?""")
9
 
10
- _url_pipe = None
11
- def _get_url_pipe():
12
- global _url_pipe
13
- if _url_pipe is None:
14
- tok = AutoTokenizer.from_pretrained(URL_MODEL_ID)
15
- mdl = AutoModelForSequenceClassification.from_pretrained(URL_MODEL_ID)
16
- _url_pipe = pipeline("text-classification", model=mdl, tokenizer=tok, device=-1, top_k=None)
17
- return _url_pipe
18
 
19
  def _pretty(raw, id2label):
20
  if id2label:
@@ -23,55 +20,60 @@ def _pretty(raw, id2label):
23
  if k in id2label: return id2label[k]
24
  return URL_LABEL_MAP.get(raw, raw)
25
 
26
- def _extract_urls(text: str):
27
- return sorted(set(m.group(0) for m in URL_RE.finditer(text or "")))
28
-
29
- def _classify_url(u: str):
30
- pipe = _get_url_pipe()
31
- id2label = getattr(pipe.model.config, "id2label", None)
32
- scores = sorted(pipe(u)[0], key=lambda s: s["score"], reverse=True)
33
- top = scores[0]
34
- lbl = _pretty(top["label"], id2label)
35
- conf_pct = round(100*float(top["score"]), 2)
36
- return lbl, conf_pct
37
-
38
- def analyze(text: str):
39
- text = (text or "").strip()
40
  if not text:
41
  return "Paste an email body or a URL.", "", "", []
 
42
  urls = [text] if (text.lower().startswith(("http://","https://","www.")) and " " not in text) else _extract_urls(text)
43
  if not urls:
44
  return "No URLs detected in the text.", "", "", []
 
 
 
 
 
 
 
 
 
 
45
  rows, unsafe, top_label, top_conf = [], False, "", ""
 
46
  for i, u in enumerate(urls, 1):
47
- try:
48
- lbl, conf = _classify_url(u)
49
- except Exception as e:
50
- rows.append([u, f"Error: {type(e).__name__}", "—"])
51
- continue
52
  rows.append([u, lbl, conf])
53
  if i == 1:
54
  top_label, top_conf = lbl, f"{conf:.2f}%"
55
  if lbl.lower() in {"phishing","malware","defacement"}:
56
  unsafe = True
 
57
  verdict = "🔴 UNSAFE (links flagged)" if unsafe else "🟢 SAFE (all links benign)"
58
  return verdict, top_label, top_conf, rows
59
 
60
- with gr.Blocks() as demo:
61
- gr.Markdown("# 🛡️ Phishing Detector (via Link Analysis)")
62
- gr.Markdown("Paste an **email** or a **URL**. We extract links and classify each one.")
63
- inp = gr.Textbox(label="Email or URL", lines=6)
64
- btn = gr.Button("Analyze", variant="primary")
65
- verdict = gr.Markdown()
66
- label = gr.Textbox(label="Prediction", interactive=False)
67
- conf = gr.Textbox(label="Confidence", interactive=False)
68
- table = gr.Dataframe(headers=["URL","Prediction","Confidence (%)"],
69
- datatype=["str","str","number"],
70
- row_count=(0,"dynamic"),
71
- col_count=(3,"fixed"),
72
- interactive=False,
73
- label="Per-link results")
74
- btn.click(analyze, [inp], [verdict, label, conf, table], show_progress=True)
75
 
76
  if __name__ == "__main__":
77
- demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)
 
 
1
+ import os, re, time
2
+ import gradio as gr
3
 
4
+ # Keep it light on CPU and quiet
5
  os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
6
 
7
  URL_MODEL_ID = "CrabInHoney/urlbert-tiny-v4-malicious-url-classifier"
8
  URL_LABEL_MAP = {"LABEL_0":"benign","LABEL_1":"defacement","LABEL_2":"malware","LABEL_3":"phishing"}
9
  URL_RE = re.compile(r"""(?xi)\b(?:https?://|www\.)[a-z0-9\-._~%]+(?:/[^\s<>"']*)?""")
10
 
11
+ _pipe = None # lazy-initialized transformers pipeline
12
+
13
+ def _extract_urls(text: str):
14
+ return sorted(set(m.group(0) for m in URL_RE.finditer(text or "")))
 
 
 
 
15
 
16
  def _pretty(raw, id2label):
17
  if id2label:
 
20
  if k in id2label: return id2label[k]
21
  return URL_LABEL_MAP.get(raw, raw)
22
 
23
+ def analyze(input_text: str):
24
+ """
25
+ Returns tuple for Interface: verdict, top_label, top_conf, table
26
+ """
27
+ t0 = time.time()
28
+ text = (input_text or "").strip()
 
 
 
 
 
 
 
 
29
  if not text:
30
  return "Paste an email body or a URL.", "", "", []
31
+
32
  urls = [text] if (text.lower().startswith(("http://","https://","www.")) and " " not in text) else _extract_urls(text)
33
  if not urls:
34
  return "No URLs detected in the text.", "", "", []
35
+
36
+ # Lazy import + pipeline creation (keeps app startup instant)
37
+ global _pipe
38
+ if _pipe is None:
39
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
40
+ tok = AutoTokenizer.from_pretrained(URL_MODEL_ID)
41
+ mdl = AutoModelForSequenceClassification.from_pretrained(URL_MODEL_ID)
42
+ _pipe = pipeline("text-classification", model=mdl, tokenizer=tok, device=-1, top_k=None)
43
+
44
+ id2label = getattr(_pipe.model.config, "id2label", None)
45
  rows, unsafe, top_label, top_conf = [], False, "", ""
46
+
47
  for i, u in enumerate(urls, 1):
48
+ scores = sorted(_pipe(u)[0], key=lambda s: s["score"], reverse=True)
49
+ top = scores[0]
50
+ lbl = _pretty(top["label"], id2label)
51
+ conf = round(100*float(top["score"]), 2)
 
52
  rows.append([u, lbl, conf])
53
  if i == 1:
54
  top_label, top_conf = lbl, f"{conf:.2f}%"
55
  if lbl.lower() in {"phishing","malware","defacement"}:
56
  unsafe = True
57
+
58
  verdict = "🔴 UNSAFE (links flagged)" if unsafe else "🟢 SAFE (all links benign)"
59
  return verdict, top_label, top_conf, rows
60
 
61
+ demo = gr.Interface(
62
+ fn=analyze,
63
+ inputs=gr.Textbox(lines=6, label="Email or URL", placeholder="Paste a URL or a full email…"),
64
+ outputs=[
65
+ gr.Markdown(label="Verdict"),
66
+ gr.Textbox(label="Prediction", interactive=False),
67
+ gr.Textbox(label="Confidence", interactive=False),
68
+ gr.Dataframe(headers=["URL","Prediction","Confidence (%)"],
69
+ datatype=["str","str","number"],
70
+ row_count=(0,"dynamic"), col_count=(3,"fixed"),
71
+ interactive=False, label="Per-link results")
72
+ ],
73
+ title="🛡️ Phishing Detector (via Link Analysis)",
74
+ description="We extract links from your text and classify each with a compact malicious-URL model."
75
+ )
76
 
77
  if __name__ == "__main__":
78
+ # Let Spaces decide host/port; keep defaults for maximum compatibility
79
+ demo.launch()
requirements.txt CHANGED
@@ -3,3 +3,4 @@ transformers==4.55.2
3
 
4
  --extra-index-url https://download.pytorch.org/whl/cpu
5
  torch==2.4.0+cpu
 
 
3
 
4
  --extra-index-url https://download.pytorch.org/whl/cpu
5
  torch==2.4.0+cpu
6
+