Spaces:
Runtime error
Runtime error
File size: 11,495 Bytes
2cc7f91 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 | """
Extract HTML Features - Direct from Files (No Metadata Needed)
Simplified version that scans directories directly
WITH QUALITY FILTERING to remove low-quality HTML files
"""
import pandas as pd
from pathlib import Path
import logging
from tqdm import tqdm
import sys
import re
from bs4 import BeautifulSoup
# Add scripts directory to path
sys.path.append(str(Path(__file__).parent))
from html_features import HTMLFeatureExtractor
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%H:%M:%S'
)
logger = logging.getLogger(__name__)
# Quality filter constants
MIN_FILE_SIZE = 1000 # Minimum 1KB
MIN_WORDS = 50 # Minimum 50 words of text content
MIN_TAGS = 10 # Minimum 10 HTML tags
ERROR_PATTERNS = [
'page not found', '404', '403', 'forbidden', 'access denied',
'error occurred', 'server error', 'not available', 'suspended',
'domain for sale', 'this site can', 'website expired',
'coming soon', 'under construction', 'parked domain',
'buy this domain', 'this domain', 'domain has expired'
]
def is_quality_html(html_content, filename=""):
"""
Check if HTML file meets quality criteria.
Returns:
tuple: (is_valid, reason)
"""
# Check 1: Minimum file size
if len(html_content) < MIN_FILE_SIZE:
return False, f"Too small ({len(html_content)} bytes)"
try:
soup = BeautifulSoup(html_content, 'html.parser')
# Check 2: Has body tag (basic HTML structure)
if not soup.find('body'):
return False, "No body tag"
# Check 3: Minimum number of tags
num_tags = len(soup.find_all())
if num_tags < MIN_TAGS:
return False, f"Too few tags ({num_tags})"
# Check 4: Get text content and check word count
text = soup.get_text(separator=' ', strip=True).lower()
words = text.split()
if len(words) < MIN_WORDS:
return False, f"Too few words ({len(words)})"
# Check 5: Not an error page
text_lower = text[:2000] # Check first 2000 chars
for pattern in ERROR_PATTERNS:
if pattern in text_lower:
return False, f"Error page pattern: '{pattern}'"
# Check 6: Has some interactive elements OR substantial content
has_links = len(soup.find_all('a')) > 0
has_forms = len(soup.find_all('form')) > 0
has_inputs = len(soup.find_all('input')) > 0
has_images = len(soup.find_all('img')) > 0
has_divs = len(soup.find_all('div')) > 3
if not (has_links or has_forms or has_inputs or has_images or has_divs):
return False, "No interactive elements"
# Check 7: Not mostly JavaScript (JS-only pages are hard to analyze)
script_content = ''.join([s.string or '' for s in soup.find_all('script')])
if len(script_content) > len(text) * 3 and len(text) < 200:
return False, "Mostly JavaScript, little content"
return True, "OK"
except Exception as e:
return False, f"Parse error: {str(e)[:50]}"
def extract_features_from_directory(html_dir, label, limit=None, apply_filter=True):
"""
Extract features from all HTML files in a directory.
Args:
html_dir: Directory containing HTML files
label: Label for these files (0=legitimate, 1=phishing)
limit: Maximum number of files to process (None = all)
apply_filter: Apply quality filter to remove bad HTML files
Returns:
List of feature dictionaries
"""
html_dir = Path(html_dir)
logger.info(f"\nProcessing: {html_dir}")
logger.info(f" Label: {'Phishing' if label == 1 else 'Legitimate'}")
logger.info(f" Quality filter: {'ENABLED' if apply_filter else 'DISABLED'}")
# Get all HTML files
html_files = sorted(html_dir.glob('*.html'))
total_files = len(html_files)
logger.info(f" Found {total_files:,} HTML files")
# Initialize extractor
extractor = HTMLFeatureExtractor()
results = []
errors = 0
filtered_out = 0
filter_reasons = {}
# Process each HTML file
for html_path in tqdm(html_files,
desc=f"Extracting {'Phishing' if label == 1 else 'Legitimate'} features"):
try:
# Read HTML content
with open(html_path, 'r', encoding='utf-8', errors='ignore') as f:
html_content = f.read()
# Apply quality filter if enabled
if apply_filter:
is_valid, reason = is_quality_html(html_content, html_path.name)
if not is_valid:
filtered_out += 1
filter_reasons[reason] = filter_reasons.get(reason, 0) + 1
continue
# Extract features
features = extractor.extract_features(html_content, url=None)
# Add metadata
features['filename'] = html_path.name # type: ignore
features['label'] = label
results.append(features)
# Check if we reached the limit
if limit and len(results) >= limit:
logger.info(f" Reached limit of {limit:,} quality files")
break
except Exception as e:
errors += 1
if errors < 10: # Show first 10 errors
logger.warning(f" Error processing {html_path.name}: {e}")
logger.info(f" Quality files extracted: {len(results):,}")
logger.info(f" Filtered out (low quality): {filtered_out:,} ({filtered_out/total_files*100:.1f}%)")
if filter_reasons and apply_filter:
logger.info(f" Filter reasons (top 5):")
for reason, count in sorted(filter_reasons.items(), key=lambda x: -x[1])[:5]:
logger.info(f" - {reason}: {count:,}")
if errors > 0:
logger.warning(f" Errors: {errors:,}")
return results
def main():
logger.info("="*80)
logger.info("BALANCED HTML FEATURES EXTRACTION (WITH QUALITY FILTER)")
logger.info("="*80)
# Quality filter info
logger.info("\nQuality Filter Criteria:")
logger.info(f" - Minimum file size: {MIN_FILE_SIZE} bytes")
logger.info(f" - Minimum word count: {MIN_WORDS} words")
logger.info(f" - Minimum HTML tags: {MIN_TAGS}")
logger.info(f" - Must have body tag")
logger.info(f" - Not an error/parked page")
logger.info(f" - Has interactive elements (links/forms/images)")
# Paths
phishing_html_dir = Path('data/html/phishing_v1')
legit_html_dir = Path('data/html/legitimate_v1')
output_path = Path('data/features/html_features_old.csv')
# Check directories exist
if not phishing_html_dir.exists():
logger.error(f"Phishing directory not found: {phishing_html_dir}")
return
if not legit_html_dir.exists():
logger.error(f"Legitimate directory not found: {legit_html_dir}")
return
# Count files
logger.info("\n1. Checking available HTML files...")
phishing_files = list(phishing_html_dir.glob('*.html'))
legit_files = list(legit_html_dir.glob('*.html'))
phishing_count = len(phishing_files)
legit_count = len(legit_files)
logger.info(f" Phishing HTML files: {phishing_count:,}")
logger.info(f" Legitimate HTML files: {legit_count:,}")
# Extract phishing features (with quality filter)
logger.info("\n2. Extracting PHISHING HTML features (with quality filter)...")
phishing_features = extract_features_from_directory(
phishing_html_dir,
label=1, # Phishing
limit=None, # Get all quality files first
apply_filter=True
)
# Extract legitimate features (with quality filter)
logger.info("\n3. Extracting LEGITIMATE HTML features (with quality filter)...")
legit_features = extract_features_from_directory(
legit_html_dir,
label=0, # Legitimate
limit=None, # Get all quality files first
apply_filter=True
)
# Balance the dataset
logger.info("\n4. Balancing dataset...")
min_count = min(len(phishing_features), len(legit_features))
logger.info(f" Quality phishing samples: {len(phishing_features):,}")
logger.info(f" Quality legitimate samples: {len(legit_features):,}")
logger.info(f" Balancing to: {min_count:,} per class")
# Truncate to balanced size
phishing_features = phishing_features[:min_count]
legit_features = legit_features[:min_count]
# Combine results
logger.info("\n5. Combining datasets...")
all_features = phishing_features + legit_features
if len(all_features) == 0:
logger.error("No features extracted! Check error messages above.")
return
# Create DataFrame
logger.info("\n6. Creating features DataFrame...")
features_df = pd.DataFrame(all_features)
# Reorder columns (filename and label first, then features)
feature_cols = [col for col in features_df.columns if col not in ['filename', 'label']]
features_df = features_df[['filename', 'label'] + feature_cols]
# Shuffle dataset
features_df = features_df.sample(frac=1, random_state=42).reset_index(drop=True)
logger.info(f" Shape: {features_df.shape}")
logger.info(f" Features: {len(feature_cols)}")
# Show label distribution
logger.info(f"\n Label distribution:")
label_counts = features_df['label'].value_counts()
for label, count in label_counts.items():
label_name = 'Phishing' if label == 1 else 'Legitimate'
logger.info(f" {label_name}: {count:,} ({count/len(features_df)*100:.1f}%)")
# Save to CSV
logger.info(f"\n7. Saving features to: {output_path}")
output_path.parent.mkdir(parents=True, exist_ok=True)
features_df.to_csv(output_path, index=False)
logger.info(f" ✓ Saved!")
# Show statistics
logger.info("\n" + "="*80)
logger.info("EXTRACTION SUMMARY")
logger.info("="*80)
logger.info(f"\nTotal samples: {len(features_df):,}")
logger.info(f" Phishing: {len(phishing_features):,}")
logger.info(f" Legitimate: {len(legit_features):,}")
logger.info(f"\nFeatures extracted: {len(feature_cols)}")
logger.info(f"Dataset balance: {(label_counts[0]/label_counts[1])*100:.1f}%")
# Show sample statistics
logger.info(f"\nFeature statistics (first 10 features):")
numeric_cols = features_df.select_dtypes(include=['int64', 'float64']).columns[:10]
stats = features_df[numeric_cols].describe()
logger.info(f"\n{stats.to_string()}")
logger.info("\n" + "="*80)
logger.info("✓ QUALITY-FILTERED HTML FEATURES EXTRACTION COMPLETE!")
logger.info("="*80)
logger.info(f"\nOutput file: {output_path}")
logger.info(f"Shape: {features_df.shape}")
logger.info(f"Quality filter removed low-quality HTML files")
logger.info("="*80)
if __name__ == '__main__':
main()
|