Gaykar commited on
Commit
c69597c
·
1 Parent(s): b58323f

added utils

Browse files
Files changed (1) hide show
  1. utils.py +162 -0
utils.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import pandas as pd
3
+ from urllib.parse import urlparse
4
+
5
+ URL_PATTERN = r'https?://\S+'
6
+ PHONE_PATTERN = r'(\+?\d[\d\-\(\)\s]{6,}\d)'
7
+
8
+
9
+ SHORTENING_PATTERN = re.compile(
10
+ r'bit\.ly|goo\.gl|shorte\.st|go2l\.ink|x\.co|ow\.ly|t\.co|tinyurl|tr\.im|is\.gd|'
11
+ r'cli\.gs|migre\.me|ff\.im|tiny\.cc|url4\.eu|twit\.ac|su\.pr|snipurl\.com|'
12
+ r'short\.to|BudURL\.com|ping\.fm|post\.ly|Just\.as|bkite\.com|snipr\.com|fic\.kr|'
13
+ r'loopt\.us|doiop\.com|short\.ie|kl\.am|wp\.me|rubyurl\.com|om\.ly|to\.ly|bit\.do|'
14
+ r'lnkd\.in|db\.tt|qr\.ae|adf\.ly|bitly\.com|cur\.lv|tinyurl\.com|ity\.im|q\.gs|'
15
+ r'po\.st|bc\.vc|twitthis\.com|u\.to|j\.mp|buzurl\.com|cutt\.us|u\.bb|yourls\.org|'
16
+ r'prettylinkpro\.com|scrnch\.me|filoops\.info|vzturl\.com|qr\.net|1url\.com|tweez\.me|v\.gd|'
17
+ r'tr\.im|link\.zip\.net',
18
+ re.IGNORECASE
19
+ )
20
+
21
+ SUSPICIOUS_PATTERN = re.compile(
22
+ r'PayPal|login|signin|bank|account|update|free|lucky|service|bonus|ebayisapi|webscr'
23
+ r'|verify|secure|password|support|alert|warning|confirm|suspend|action-required'
24
+ r'|activity|limited|access-restricted|authentication|recover|reset'
25
+ r'|invoice|payment|billing|purchase|transaction|refund'
26
+ r'|microsoft|google|amazon|apple|netflix|fedex|dhl|ups'
27
+ r'|redirect|cgi-bin|admin|\.exe|\.zip|\.rar|\.js|\.scr|\.bat|\.php'
28
+ r'|\.xyz|\.top|\.icu|\.biz|\.info|\.live|\.link|\.click',
29
+ re.IGNORECASE
30
+ )
31
+
32
+
33
+
34
+ def extract_urls_from_body(row):
35
+ """Extract URLs from email body text."""
36
+ if isinstance(row.get('body', ''), str):
37
+ found = re.findall(URL_PATTERN, row['body'])
38
+ if found:
39
+ return " [NEXT] ".join(found)
40
+ return ""
41
+
42
+
43
+ # --- 2. URL COUNT ---
44
+
45
+
46
+ def count_urls(cell):
47
+ return 0 if cell == "" else cell.count(" [NEXT]") + 1
48
+
49
+ #--- 3. ATTACHMENT EXTRACTION ---
50
+
51
+
52
+ def extract_attachment_names(body):
53
+ pattern = r'filename="([^"]+)"'
54
+ found = re.findall(pattern, body) if isinstance(body, str) else []
55
+ return "\n".join(found) if found else ""
56
+
57
+
58
+ # --- 4. CLEANED TEXT BUILDING ---
59
+
60
+
61
+ def create_combined_text(row):
62
+ subject = str(row['subject']) if pd.notnull(row['subject']) else ""
63
+ body = str(row['body']) if pd.notnull(row['body']) else ""
64
+
65
+ body = re.sub(URL_PATTERN, "[LINK]", body) # Replace URLs
66
+ body = re.sub(PHONE_PATTERN, "[PHONE]", body) # Replace phone numbers
67
+
68
+ return f"[SSUB] {subject.strip()} [ESUB] [SBODY] {body.strip()} [EBODY]"
69
+
70
+
71
+ #--- 5. IP ADDRESS DETECTION ---
72
+
73
+
74
+ def having_ip_address(url):
75
+ match = re.search(
76
+ '(([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.'
77
+ '([01]?\\d\\d?|2[0-4]\\d|25[0-5]))|'
78
+ '((0x[0-9a-fA-F]{1,2})\\.(0x[0-9a-fA-F]{1,2})\\.(0x[0-9a-fA-F]{1,2})\\.(0x[0-9a-fA-F]{1,2}))|'
79
+ '(?:[a-fA-F0-9]{1,4}:){7}[a-fA-F0-9]{1,4}',
80
+ url
81
+ )
82
+ return 1 if match else 0
83
+
84
+ # --- 6. BASIC URL FEATURES (LENGTH / SUBDOMAIN) ---
85
+
86
+
87
+ def split_urls(sample):
88
+ if pd.isna(sample) or not isinstance(sample, str):
89
+ return []
90
+ return [u.strip() for u in sample.split('[NEXT]') if u.strip()]
91
+
92
+ def url_length(url): return len(url)
93
+
94
+ def subdomain_count(url):
95
+ try:
96
+ hostname = urlparse(url).hostname
97
+ if hostname is None: return 0
98
+ parts = hostname.split('.')
99
+ return max(len(parts) - 2, 0)
100
+ except:
101
+ return 0
102
+
103
+ def extract_basic_url_stats(sample):
104
+ urls = split_urls(sample)
105
+ if not urls:
106
+ return pd.Series([0,0,0,0],
107
+ index=['url_length_max','url_length_avg','url_subdom_max','url_subdom_avg']
108
+ )
109
+
110
+ lengths = [len(u) for u in urls]
111
+ subs = [subdomain_count(u) for u in urls]
112
+
113
+ return pd.Series([
114
+ max(lengths),
115
+ sum(lengths)/len(lengths),
116
+ max(subs),
117
+ sum(subs)/len(subs)
118
+ ],
119
+ index=['url_length_max','url_length_avg','url_subdom_max','url_subdom_avg'])
120
+
121
+ # --- 7. SHORT URL COUNT ---
122
+
123
+
124
+ def count_shortened_urls(sample):
125
+ urls = split_urls(sample)
126
+ return sum(1 for u in urls if SHORTENING_PATTERN.search(u))
127
+
128
+ # --- 8. SUSPICIOUS KEYWORD COUNT ---
129
+
130
+
131
+ def suspicious_words_count(sample):
132
+ urls = split_urls(sample)
133
+ return sum(1 for u in urls if SUSPICIOUS_PATTERN.search(u))
134
+
135
+
136
+ # --- 9. DOT COUNT FEATURES ---
137
+
138
+
139
+ def dot_count(url): return url.count('.')
140
+
141
+ def extract_dot_features(sample):
142
+ urls = split_urls(sample)
143
+ if not urls:
144
+ return pd.Series([0,0], index=['dot_count_max','dot_count_avg'])
145
+ dots = [dot_count(u) for u in urls]
146
+ return pd.Series([max(dots), sum(dots)/len(dots)],
147
+ index=['dot_count_max','dot_count_avg'])
148
+
149
+
150
+ # 10. GENERIC CHARACTER-COUNT FEATURES ---
151
+
152
+
153
+ def char_count(url, char):
154
+ return url.count(char) if url else 0
155
+
156
+ def extract_char_features(sample, char, name):
157
+ urls = split_urls(sample)
158
+ if not urls:
159
+ return pd.Series([0,0], index=[f'{name}_max',f'{name}_avg'])
160
+ counts = [char_count(u, char) for u in urls]
161
+ return pd.Series([max(counts), sum(counts)/len(counts)],
162
+ index=[f'{name}_max',f'{name}_avg'])