Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
# -*- coding: utf-8 -*-
|
| 3 |
"""
|
| 4 |
-
سیستم
|
| 5 |
"""
|
| 6 |
|
| 7 |
import pandas as pd
|
|
@@ -34,43 +34,36 @@ def convert_to_serializable(obj):
|
|
| 34 |
else:
|
| 35 |
return obj
|
| 36 |
|
| 37 |
-
# ===== کلاس
|
| 38 |
-
class
|
| 39 |
def __init__(self):
|
| 40 |
-
self.
|
| 41 |
-
self.counters = {
|
| 42 |
-
'PERSON': 0, 'MIXED_NAMES': 0, 'ID_NUMBER': 0, 'ENGLISH_TITLES': 0,
|
| 43 |
-
'AMOUNT': 0, 'INTERNATIONAL_CURRENCIES': 0, 'ACCOUNT': 0,
|
| 44 |
-
'FINANCIAL_TERMS': 0, 'STOCK_SYMBOL': 0,
|
| 45 |
-
'DATE': 0, 'ADVANCED_DATE_FORMATS': 0, 'TIME_RANGES': 0,
|
| 46 |
-
'LOCATION': 0, 'COMPLEX_ADDRESSES': 0,
|
| 47 |
-
'TECHNICAL_CODES': 0, 'NETWORK_ADDRESSES': 0, 'TECHNICAL_UNITS': 0,
|
| 48 |
-
'ACRONYMS_ABBREVIATIONS': 0,
|
| 49 |
-
'COMPANY': 0, 'BUSINESS_TERMS': 0, 'PRODUCT': 0, 'PETROCHEMICAL': 0,
|
| 50 |
-
'PERCENTAGE': 0, 'VOLUME': 0, 'RATIOS': 0,
|
| 51 |
-
'PHONE': 0, 'EMAIL': 0
|
| 52 |
-
}
|
| 53 |
-
self.api_key = os.getenv("OPENAI_API_KEY", "")
|
| 54 |
-
|
| 55 |
-
def get_improved_patterns(self):
|
| 56 |
-
"""الگوهای کاملاً بهبود یافته و تست شده"""
|
| 57 |
-
return {
|
| 58 |
'COMPANY': [
|
| 59 |
-
r'شرکت\s+پتروشیمی\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+?(?=\s|$|،|\.)',
|
| 60 |
-
r'شرکت\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]*(?:پتروشیمی|نفت|گاز|صنایع|تولید)[\u0600-\u06FF\u0750-\u077F\s\u200C]*',
|
| 61 |
-
r'بانک\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 62 |
r'شرکت\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 63 |
-
r'
|
| 64 |
-
r'[
|
|
|
|
| 65 |
],
|
| 66 |
|
| 67 |
'LOCATION': [
|
| 68 |
r'بندر\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 69 |
r'شهر\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 70 |
r'استان\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 71 |
-
r'\b(?:تهران|اصفهان|ماهشهر|عسلویه|بندرعباس|اهواز|شیراز|مشهد|تبریز|کرج|قم|رشت|کرمان|یزد|زاهدان|بوشهر|خرمشهر|آبادان|اراک|قزوین)\b',
|
| 72 |
r'\b(?:ایران|عراق|کویت|عربستان|امارات|قطر|عمان|بحرین|ترکیه|پاکستان|افغانستان)\b',
|
| 73 |
-
r'\b(?:London|Paris|Tokyo|New\s+York|Dubai|Singapore|Hong\s+Kong
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
],
|
| 75 |
|
| 76 |
'DATE': [
|
|
@@ -83,223 +76,213 @@ class LightweightDataAnonymizer:
|
|
| 83 |
r'(?:13[0-9]{2}|14[0-9]{2}|20[0-9]{2}|19[0-9]{2})(?=\s|$|،|\.)'
|
| 84 |
],
|
| 85 |
|
| 86 |
-
'PERSON': [
|
| 87 |
-
r'آقای\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+?(?=\s|$|،|\.)',
|
| 88 |
-
r'خانم\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+?(?=\s|$|،|\.)',
|
| 89 |
-
r'مهندس\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+?(?=\s|$|،|\.)',
|
| 90 |
-
r'دکتر\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+?(?=\s|$|،|\.)',
|
| 91 |
-
r'استاد\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+?(?=\s|$|،|\.)',
|
| 92 |
-
r'مدیرعامل\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+?(?=\s|$|،|\.)',
|
| 93 |
-
r'[\u0600-\u06FF\u0750-\u077F\s\u200C]+\s+مدیرعامل(?=\s|$|،|\.)',
|
| 94 |
-
r'Mr\.\s+[a-zA-Z\s]+?(?=\s|,|\.|$)',
|
| 95 |
-
r'Ms\.\s+[a-zA-Z\s]+?(?=\s|,|\.|$)',
|
| 96 |
-
r'Dr\.\s+[a-zA-Z\s]+?(?=\s|,|\.|$)'
|
| 97 |
-
],
|
| 98 |
-
|
| 99 |
'PHONE': [
|
| 100 |
-
r'(?:
|
| 101 |
-
r'(?:ت
|
| 102 |
-
r'(?:موبایل[\s:]*)?(?:شماره[\s:]*)?(?:0)?9[\u06F0-\u06F90-9]{9}',
|
| 103 |
-
r'[\u06F0-\u06F90-9]{3,4}[-\s][\u06F0-\u06F90-9]{7,8}',
|
| 104 |
r'[\u06F0-\u06F90-9]{11}(?!\d)',
|
| 105 |
-
r'
|
| 106 |
-
r'\+[0-9]{1,3}-[0-9]{3}-[0-9]{3}-[0-9]{4}(?:\s+ext\.\s+[0-9]{3,4})?',
|
| 107 |
-
r'\([0-9]{3}\)\s+[0-9]{3}-[0-9]{4}'
|
| 108 |
],
|
| 109 |
|
| 110 |
'EMAIL': [
|
| 111 |
r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}',
|
| 112 |
-
r'ایمیل[\s:]*[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}',
|
| 113 |
-
r'email[\s:]*[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}',
|
| 114 |
-
r'نشانی[\s]*الکترونیکی[\s:]*[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}',
|
| 115 |
-
r'آدرس[\s]*ایمیل[\s:]*[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}'
|
| 116 |
],
|
| 117 |
|
| 118 |
'AMOUNT': [
|
| 119 |
r'\d+(?:,\d{3})*\s*(?:میلیون|میلیارد|هزار)\s*تومان',
|
| 120 |
r'مبلغ\s+\d+(?:,\d{3})*\s*(?:میلیون|میلیارد|هزار)?\s*تومان',
|
| 121 |
r'\d+\s*تومان',
|
| 122 |
-
r'\$\d+(?:,\d{3})*(?:\.\d+)?\s*(?:million|billion|thousand|M|B|K)?',
|
| 123 |
r'\d+(?:,\d{3})*\s*ریال'
|
| 124 |
],
|
| 125 |
|
| 126 |
'ACCOUNT': [
|
| 127 |
-
r'(?:شماره[\s]*
|
| 128 |
-
r'حساب[\s]*
|
| 129 |
-
r'شماره[\s]*حساب[\s:]*(?:[\u06F0-\u06F90-9]{1,3}[-\s]?)*[\u06F0-\u06F90-9]{8,20}',
|
| 130 |
-
r'Account[\s]*(?:Number[\s:]*)?(?:[0-9]{1,3}[-\s]?)*[0-9]{8,20}',
|
| 131 |
-
r'[\u06F0-\u06F90-9]{3}[-\s]?[\u06F0-\u06F90-9]{3}[-\s]?[\u06F0-\u06F90-9]{6,12}'
|
| 132 |
]
|
| 133 |
}
|
| 134 |
-
|
| 135 |
-
def
|
| 136 |
-
"""
|
| 137 |
-
# کلمات
|
| 138 |
-
|
| 139 |
-
|
| 140 |
# حذف فاصلههای اضافی
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
|
|
|
|
|
|
| 145 |
return False
|
| 146 |
|
| 147 |
-
#
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
# بررسیهای خاص بر اساس دستهبندی
|
| 153 |
-
if category == 'COMPANY':
|
| 154 |
-
# شرکت نباید فقط کلمات عمومی باشد
|
| 155 |
-
if entity_text in ['شرکت', 'بانک', 'پتروشیمی']:
|
| 156 |
-
return False
|
| 157 |
|
| 158 |
return True
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
def __init__(self):
|
| 163 |
-
self.anonymizer = LightweightDataAnonymizer()
|
| 164 |
-
self.results = []
|
| 165 |
-
|
| 166 |
-
def extract_entities_from_text(self, text: str) -> Dict[str, List[str]]:
|
| 167 |
-
"""استخراج تمام entities موجود در متن اصلی با الگوهای بهبود یافته"""
|
| 168 |
-
entities = defaultdict(list)
|
| 169 |
-
|
| 170 |
if not text or text.strip() == '':
|
| 171 |
-
return
|
| 172 |
|
| 173 |
-
|
| 174 |
-
patterns = self.anonymizer.get_improved_patterns()
|
| 175 |
|
| 176 |
-
for category, pattern_list in patterns.items():
|
|
|
|
|
|
|
| 177 |
for pattern_str in pattern_list:
|
| 178 |
try:
|
| 179 |
-
# تبدیل string به regex object
|
| 180 |
pattern = re.compile(pattern_str, re.IGNORECASE | re.MULTILINE)
|
| 181 |
-
|
| 182 |
matches = pattern.finditer(text)
|
|
|
|
| 183 |
for match in matches:
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
full_match = re.sub(r'\s+', ' ', full_match)
|
| 189 |
-
full_match = re.sub(r'\s*(در|که|با|به|از|را)\s*$', '', full_match).strip()
|
| 190 |
-
|
| 191 |
-
if self.anonymizer.is_valid_entity(full_match, category):
|
| 192 |
-
entities[category].append(full_match)
|
| 193 |
-
|
| 194 |
except re.error as e:
|
| 195 |
logger.error(f"Regex error in pattern {pattern_str}: {e}")
|
| 196 |
continue
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
# حذف duplicates و مرتبسازی
|
| 202 |
-
for category in entities:
|
| 203 |
-
entities[category] = sorted(list(set(entities[category])))
|
| 204 |
|
| 205 |
-
return
|
| 206 |
|
| 207 |
-
def extract_anonymized_codes(self,
|
| 208 |
-
"""استخراج کدهای ناشناسسازی
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
if not anonymized_text or anonymized_text.strip() == '':
|
| 212 |
-
return dict(anonymized_entities)
|
| 213 |
|
| 214 |
-
|
| 215 |
pattern = r'([a-zA-Z_]+)_(\d{3})'
|
| 216 |
|
| 217 |
try:
|
| 218 |
-
matches = re.finditer(pattern,
|
| 219 |
for match in matches:
|
| 220 |
category = match.group(1).upper()
|
| 221 |
code = match.group(0)
|
| 222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
except Exception as e:
|
| 224 |
-
logger.error(f"Error extracting
|
| 225 |
|
| 226 |
-
# حذف
|
| 227 |
-
for category in
|
| 228 |
-
|
| 229 |
|
| 230 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
|
| 232 |
-
def
|
| 233 |
-
"""
|
|
|
|
|
|
|
|
|
|
| 234 |
|
| 235 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
|
| 237 |
# استخراج entities از متن اصلی
|
| 238 |
-
|
| 239 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
|
| 241 |
-
# استخراج کدهای ناشناسسازی
|
| 242 |
-
|
| 243 |
-
|
| 244 |
|
| 245 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 246 |
category_metrics = {}
|
| 247 |
total_tp, total_fp, total_fn = 0, 0, 0
|
| 248 |
|
| 249 |
-
# تمام categories که در هر کدام وجود دارند
|
| 250 |
all_categories = set(original_entities.keys()) | set(anonymized_codes.keys())
|
| 251 |
|
| 252 |
for category in all_categories:
|
| 253 |
original_count = len(original_entities.get(category, []))
|
| 254 |
anonymized_count = len(anonymized_codes.get(category, []))
|
| 255 |
|
| 256 |
-
# True Positives: تعداد entities که درست ناشناسسازی شدند
|
| 257 |
tp = min(original_count, anonymized_count)
|
| 258 |
-
|
| 259 |
-
# False Positives: entities اضافی که اشتباه ناشناسسازی شدند
|
| 260 |
fp = max(0, anonymized_count - original_count)
|
| 261 |
-
|
| 262 |
-
# False Negatives: entities که شناسایی نشدند
|
| 263 |
fn = max(0, original_count - anonymized_count)
|
| 264 |
|
| 265 |
-
# محاسبه متریکهای category
|
| 266 |
precision = tp / (tp + fp) if (tp + fp) > 0 else 0
|
| 267 |
recall = tp / (tp + fn) if (tp + fn) > 0 else 0
|
| 268 |
f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
|
| 269 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 270 |
category_metrics[category] = {
|
| 271 |
'original_count': original_count,
|
| 272 |
'anonymized_count': anonymized_count,
|
| 273 |
-
'tp': tp,
|
| 274 |
-
'
|
| 275 |
-
'fn': fn,
|
| 276 |
-
'precision': precision,
|
| 277 |
-
'recall': recall,
|
| 278 |
-
'f1_score': f1_score
|
| 279 |
}
|
| 280 |
|
| 281 |
total_tp += tp
|
| 282 |
total_fp += fp
|
| 283 |
total_fn += fn
|
| 284 |
|
| 285 |
-
# م
|
| 286 |
overall_precision = total_tp / (total_tp + total_fp) if (total_tp + total_fp) > 0 else 0
|
| 287 |
overall_recall = total_tp / (total_tp + total_fn) if (total_tp + total_fn) > 0 else 0
|
| 288 |
overall_f1 = 2 * (overall_precision * overall_recall) / (overall_precision + overall_recall) if (overall_precision + overall_recall) > 0 else 0
|
|
|
|
| 289 |
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
|
| 296 |
return {
|
| 297 |
'original_entities': original_entities,
|
| 298 |
'anonymized_codes': anonymized_codes,
|
| 299 |
'category_metrics': category_metrics,
|
| 300 |
'overall_metrics': {
|
| 301 |
-
'total_original_entities':
|
| 302 |
-
'total_anonymized_entities':
|
| 303 |
'total_tp': total_tp,
|
| 304 |
'total_fp': total_fp,
|
| 305 |
'total_fn': total_fn,
|
|
@@ -310,393 +293,211 @@ class AnonymizationBenchmark:
|
|
| 310 |
}
|
| 311 |
}
|
| 312 |
|
| 313 |
-
def
|
| 314 |
-
"""
|
| 315 |
-
|
| 316 |
-
# خواندن فایل CSV
|
| 317 |
try:
|
| 318 |
-
#
|
|
|
|
| 319 |
for encoding in ['utf-8', 'utf-8-sig', 'cp1256', 'windows-1256']:
|
| 320 |
try:
|
| 321 |
df = pd.read_csv(csv_file_path, encoding=encoding)
|
| 322 |
-
|
| 323 |
break
|
| 324 |
except UnicodeDecodeError:
|
| 325 |
continue
|
| 326 |
-
else:
|
| 327 |
-
raise Exception("Could not read CSV file with any encoding")
|
| 328 |
-
|
| 329 |
-
except Exception as e:
|
| 330 |
-
logger.error(f"Error loading CSV file: {e}")
|
| 331 |
-
return None
|
| 332 |
-
|
| 333 |
-
# بررسی وجود ستونهای مورد نیاز
|
| 334 |
-
if 'original_text' not in df.columns or 'anonymized_text' not in df.columns:
|
| 335 |
-
logger.error(f"CSV file must contain 'original_text' and 'anonymized_text' columns. Found columns: {df.columns.tolist()}")
|
| 336 |
-
return None
|
| 337 |
-
|
| 338 |
-
# بررسی اینکه آیا داده موجود است
|
| 339 |
-
if len(df) == 0:
|
| 340 |
-
logger.error("CSV file is empty")
|
| 341 |
-
return None
|
| 342 |
-
|
| 343 |
-
results = []
|
| 344 |
-
|
| 345 |
-
for index, row in df.iterrows():
|
| 346 |
-
logger.info(f"Processing row {index + 1}/{len(df)}")
|
| 347 |
|
| 348 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
original_text = str(row['original_text']) if pd.notna(row['original_text']) else ""
|
| 350 |
anonymized_text = str(row['anonymized_text']) if pd.notna(row['anonymized_text']) else ""
|
| 351 |
|
| 352 |
if original_text.strip() == "" and anonymized_text.strip() == "":
|
| 353 |
-
|
| 354 |
continue
|
| 355 |
|
| 356 |
-
#
|
| 357 |
-
|
|
|
|
| 358 |
|
| 359 |
-
# ذخیره نت
|
| 360 |
result = {
|
| 361 |
'row_id': int(index),
|
| 362 |
'original_text': original_text,
|
| 363 |
'anonymized_text': anonymized_text,
|
| 364 |
-
|
| 365 |
-
'total_anonymized_entities': int(metrics['overall_metrics']['total_anonymized_entities']),
|
| 366 |
-
'tp': int(metrics['overall_metrics']['total_tp']),
|
| 367 |
-
'fp': int(metrics['overall_metrics']['total_fp']),
|
| 368 |
-
'fn': int(metrics['overall_metrics']['total_fn']),
|
| 369 |
-
'precision': float(metrics['overall_metrics']['precision']),
|
| 370 |
-
'recall': float(metrics['overall_metrics']['recall']),
|
| 371 |
-
'f1_score': float(metrics['overall_metrics']['f1_score']),
|
| 372 |
-
'accuracy': float(metrics['overall_metrics']['accuracy'])
|
| 373 |
}
|
| 374 |
|
| 375 |
-
# اضافه کردن متریکهای category
|
| 376 |
-
for category,
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
result[f'{category.lower()}_f1'] = float(cat_metrics['f1_score'])
|
| 380 |
-
result[f'{category.lower()}_original_count'] = int(cat_metrics['original_count'])
|
| 381 |
-
result[f'{category.lower()}_anonymized_count'] = int(cat_metrics['anonymized_count'])
|
| 382 |
|
| 383 |
results.append(result)
|
| 384 |
-
|
| 385 |
-
except Exception as e:
|
| 386 |
-
logger.error(f"Error processing row {index}: {e}")
|
| 387 |
-
continue
|
| 388 |
-
|
| 389 |
-
if len(results) == 0:
|
| 390 |
-
logger.error("No valid results were generated")
|
| 391 |
-
return None
|
| 392 |
-
|
| 393 |
-
return pd.DataFrame(results)
|
| 394 |
-
|
| 395 |
-
def generate_summary_report(self, results_df: pd.DataFrame) -> Dict:
|
| 396 |
-
"""تولید گزارش خلاصه"""
|
| 397 |
-
|
| 398 |
-
if results_df is None or len(results_df) == 0:
|
| 399 |
-
return {}
|
| 400 |
-
|
| 401 |
-
try:
|
| 402 |
-
summary = {
|
| 403 |
-
'total_texts_processed': len(results_df),
|
| 404 |
-
'average_metrics': {
|
| 405 |
-
'precision': float(results_df['precision'].mean()),
|
| 406 |
-
'recall': float(results_df['recall'].mean()),
|
| 407 |
-
'f1_score': float(results_df['f1_score'].mean()),
|
| 408 |
-
'accuracy': float(results_df['accuracy'].mean())
|
| 409 |
-
},
|
| 410 |
-
'total_entities': {
|
| 411 |
-
'original': int(results_df['total_original_entities'].sum()),
|
| 412 |
-
'anonymized': int(results_df['total_anonymized_entities'].sum()),
|
| 413 |
-
'tp': int(results_df['tp'].sum()),
|
| 414 |
-
'fp': int(results_df['fp'].sum()),
|
| 415 |
-
'fn': int(results_df['fn'].sum())
|
| 416 |
-
}
|
| 417 |
-
}
|
| 418 |
-
|
| 419 |
-
# محاسبه متریکهای کلی بر اساس مجموع
|
| 420 |
-
total_tp = summary['total_entities']['tp']
|
| 421 |
-
total_fp = summary['total_entities']['fp']
|
| 422 |
-
total_fn = summary['total_entities']['fn']
|
| 423 |
-
total_original = summary['total_entities']['original']
|
| 424 |
-
|
| 425 |
-
summary['overall_metrics'] = {
|
| 426 |
-
'precision': total_tp / (total_tp + total_fp) if (total_tp + total_fp) > 0 else 0,
|
| 427 |
-
'recall': total_tp / (total_tp + total_fn) if (total_tp + total_fn) > 0 else 0,
|
| 428 |
-
'accuracy': total_tp / total_original if total_original > 0 else 0
|
| 429 |
-
}
|
| 430 |
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
overall_recall = summary['overall_metrics']['recall']
|
| 434 |
-
summary['overall_metrics']['f1_score'] = 2 * (overall_precision * overall_recall) / (overall_precision + overall_recall) if (overall_precision + overall_recall) > 0 else 0
|
| 435 |
|
| 436 |
-
|
| 437 |
-
category_columns = [col for col in results_df.columns if col.endswith('_precision')]
|
| 438 |
-
categories = [col.replace('_precision', '').upper() for col in category_columns]
|
| 439 |
-
|
| 440 |
-
category_summary = {}
|
| 441 |
-
for category in categories:
|
| 442 |
-
cat_lower = category.lower()
|
| 443 |
-
if f'{cat_lower}_precision' in results_df.columns:
|
| 444 |
-
# فیلتر کردن ردیفهایی که این category دارند
|
| 445 |
-
mask = results_df[f'{cat_lower}_original_count'] > 0
|
| 446 |
-
if mask.any():
|
| 447 |
-
category_summary[category] = {
|
| 448 |
-
'count_texts_with_category': int(mask.sum()),
|
| 449 |
-
'average_precision': float(results_df.loc[mask, f'{cat_lower}_precision'].mean()),
|
| 450 |
-
'average_recall': float(results_df.loc[mask, f'{cat_lower}_recall'].mean()),
|
| 451 |
-
'average_f1': float(results_df.loc[mask, f'{cat_lower}_f1'].mean()),
|
| 452 |
-
'total_original': int(results_df[f'{cat_lower}_original_count'].sum()),
|
| 453 |
-
'total_anonymized': int(results_df[f'{cat_lower}_anonymized_count'].sum())
|
| 454 |
-
}
|
| 455 |
-
|
| 456 |
-
summary['category_summary'] = category_summary
|
| 457 |
-
|
| 458 |
-
# تبدیل همه مقادیر به serializable types
|
| 459 |
-
summary = convert_to_serializable(summary)
|
| 460 |
-
|
| 461 |
-
return summary
|
| 462 |
|
| 463 |
except Exception as e:
|
| 464 |
-
|
| 465 |
-
return {'error': str(e)}
|
| 466 |
|
| 467 |
-
# ===== رابط
|
| 468 |
-
def
|
| 469 |
-
"""
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
'original_text': 'مجمع عمومی عادی سالیانه شرکت پتروشیمی کارون در بندر ماهشهر برگزار شد.',
|
| 473 |
-
'anonymized_text': 'مجمع عمومی عادی سالیانه company_001 در location_001 برگزار شد.'
|
| 474 |
-
},
|
| 475 |
-
{
|
| 476 |
-
'original_text': 'آقای احمد محمدی مدیرعامل شرکت با شماره تماس 09123456789 و ایمیل ahmad@company.com قرارداد امضا کرد.',
|
| 477 |
-
'anonymized_text': 'person_001 مدیرعامل شرکت با شماره تماس phone_001 و ایمیل email_001 قرارداد امضا کرد.'
|
| 478 |
-
},
|
| 479 |
-
{
|
| 480 |
-
'original_text': 'بانک ملی ایران مبلغ 500 میلیون تومان به حساب 123-456-789012 واریز کرد.',
|
| 481 |
-
'anonymized_text': 'company_001 مبلغ amount_001 به حساب account_001 واریز کرد.'
|
| 482 |
-
}
|
| 483 |
-
]
|
| 484 |
|
| 485 |
-
|
| 486 |
-
sample_file_path = 'sample_benchmark_data.csv'
|
| 487 |
-
df.to_csv(sample_file_path, index=False, encoding='utf-8-sig')
|
| 488 |
|
| 489 |
-
|
| 490 |
-
|
| 491 |
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
"""پردازش فایل CSV آپلود شده"""
|
| 496 |
-
if file is None:
|
| 497 |
-
return "لطفاً فایل CSV را آپلود کنید.", None, None
|
| 498 |
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
# اجرای benchmark
|
| 521 |
-
benchmark = AnonymizationBenchmark()
|
| 522 |
-
results_df = benchmark.benchmark_from_csv(file.name)
|
| 523 |
-
|
| 524 |
-
if results_df is None or len(results_df) == 0:
|
| 525 |
-
return "خطا در پردازش فایل CSV یا هیچ نتیجه معتبری تولید نشد!", None, None
|
| 526 |
-
|
| 527 |
-
logger.info(f"Benchmark completed. Results shape: {results_df.shape}")
|
| 528 |
-
|
| 529 |
-
# تولید گزارش خلاصه
|
| 530 |
-
summary = benchmark.generate_summary_report(results_df)
|
| 531 |
-
|
| 532 |
-
if 'error' in summary:
|
| 533 |
-
return f"خطا در تولید گزارش: {summary['error']}", None, None
|
| 534 |
-
|
| 535 |
-
# آمادهسازی نتایج برای نمایش
|
| 536 |
-
metrics_text = f"""
|
| 537 |
-
=== نتایج کلی Benchmark ===
|
| 538 |
-
|
| 539 |
-
تعداد متون پردازش شده: {summary.get('total_texts_processed', 0)}
|
| 540 |
-
|
| 541 |
-
=== متریکهای کلی (بر اساس مجموع) ===
|
| 542 |
-
• Precision: {summary.get('overall_metrics', {}).get('precision', 0):.4f}
|
| 543 |
-
• Recall: {summary.get('overall_metrics', {}).get('recall', 0):.4f}
|
| 544 |
-
• F1-Score: {summary.get('overall_metrics', {}).get('f1_score', 0):.4f}
|
| 545 |
-
• Accuracy: {summary.get('overall_metrics', {}).get('accuracy', 0):.4f}
|
| 546 |
|
| 547 |
-
|
| 548 |
-
• تعداد
|
| 549 |
-
•
|
| 550 |
-
•
|
| 551 |
-
•
|
| 552 |
-
• False
|
|
|
|
| 553 |
|
| 554 |
-
|
| 555 |
-
•
|
| 556 |
-
•
|
| 557 |
-
•
|
| 558 |
-
•
|
| 559 |
-
"""
|
| 560 |
|
| 561 |
-
|
| 562 |
-
if 'category_summary' in summary and summary['category_summary']:
|
| 563 |
-
metrics_text += "\n=== آمار دستهبندیها ===\n"
|
| 564 |
-
for category, stats in summary['category_summary'].items():
|
| 565 |
-
metrics_text += f"""
|
| 566 |
-
{category}:
|
| 567 |
-
• تعداد متون دارای این دسته: {stats.get('count_texts_with_category', 0)}
|
| 568 |
-
• میانگین Precision: {stats.get('average_precision', 0):.4f}
|
| 569 |
-
• میانگین Recall: {stats.get('average_recall', 0):.4f}
|
| 570 |
-
• میانگین F1-Score: {stats.get('average_f1', 0):.4f}
|
| 571 |
-
• کل Entities اصلی: {stats.get('total_original', 0)}
|
| 572 |
-
• کل Entities ناشناسسازی شده: {stats.get('total_anonymized', 0)}
|
| 573 |
"""
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
results_df.to_csv("benchmark_results_detailed.csv", index=False, encoding='utf-8-sig')
|
| 580 |
-
with open("benchmark_results_summary.json", 'w', encoding='utf-8') as f:
|
| 581 |
-
json.dump(summary, f, ensure_ascii=False, indent=2)
|
| 582 |
-
logger.info("Results saved successfully")
|
| 583 |
-
except Exception as e:
|
| 584 |
-
logger.error(f"Error saving results: {e}")
|
| 585 |
-
|
| 586 |
-
# انتخاب ستونهای مهم برای نمایش
|
| 587 |
-
display_columns = ['row_id', 'precision', 'recall', 'f1_score', 'accuracy', 'total_original_entities', 'total_anonymized_entities']
|
| 588 |
-
display_df = results_df[display_columns] if all(col in results_df.columns for col in display_columns) else results_df
|
| 589 |
-
|
| 590 |
-
return (
|
| 591 |
-
metrics_text,
|
| 592 |
-
display_df,
|
| 593 |
-
summary
|
| 594 |
-
)
|
| 595 |
-
|
| 596 |
except Exception as e:
|
| 597 |
-
|
| 598 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 599 |
|
| 600 |
def download_results():
|
| 601 |
-
"""دانلود
|
| 602 |
-
if os.path.exists("
|
| 603 |
-
return "
|
| 604 |
return None
|
| 605 |
|
| 606 |
-
# ===== رابط اصلی
|
| 607 |
def main():
|
| 608 |
-
"
|
| 609 |
-
|
| 610 |
-
with gr.Blocks(title="Anonymization Benchmark", theme=gr.themes.Soft()) as demo:
|
| 611 |
|
| 612 |
gr.HTML("""
|
| 613 |
<h1 style='text-align: center; color: #2E86AB; margin-bottom: 30px;'>
|
| 614 |
-
|
| 615 |
</h1>
|
| 616 |
""")
|
| 617 |
|
| 618 |
with gr.Row():
|
| 619 |
with gr.Column():
|
| 620 |
-
gr.HTML("<h3>📁 آپلود فایل CSV</h3>")
|
| 621 |
gr.HTML("""
|
| 622 |
-
<div style='background: #
|
| 623 |
-
<
|
| 624 |
-
|
| 625 |
-
|
| 626 |
-
|
|
|
|
|
|
|
|
|
|
| 627 |
</div>
|
| 628 |
""")
|
| 629 |
|
| 630 |
file_input = gr.File(
|
| 631 |
-
label="فایل CSV را
|
| 632 |
file_types=[".csv"],
|
| 633 |
file_count="single"
|
| 634 |
)
|
| 635 |
|
| 636 |
-
|
| 637 |
-
benchmark_btn = gr.Button("🚀 شروع Benchmark", variant="primary")
|
| 638 |
-
sample_btn = gr.Button("📄 ایجاد فایل نمونه", variant="secondary")
|
| 639 |
|
| 640 |
with gr.Row():
|
| 641 |
with gr.Column():
|
| 642 |
-
gr.HTML("<h3>
|
| 643 |
|
| 644 |
-
|
| 645 |
-
label="
|
| 646 |
-
lines=
|
| 647 |
-
max_lines=
|
| 648 |
interactive=False
|
| 649 |
)
|
| 650 |
|
| 651 |
with gr.Row():
|
| 652 |
with gr.Column():
|
| 653 |
-
gr.HTML("<h3>📋 جدول نتایج
|
| 654 |
|
| 655 |
results_table = gr.Dataframe(
|
| 656 |
-
label="
|
| 657 |
interactive=False,
|
| 658 |
wrap=True
|
| 659 |
)
|
| 660 |
|
| 661 |
with gr.Row():
|
| 662 |
with gr.Column():
|
| 663 |
-
download_btn = gr.Button("💾 دانلود نتایج
|
| 664 |
download_file = gr.File(label="فایل نتایج", visible=False)
|
| 665 |
|
| 666 |
-
with gr.Row():
|
| 667 |
-
with gr.Column():
|
| 668 |
-
gr.HTML("""
|
| 669 |
-
<div style='background: #fff8dc; padding: 15px; border-radius: 10px; margin-top: 15px;'>
|
| 670 |
-
<h4>🔍 راهنمای استفاده:</h4>
|
| 671 |
-
<ol>
|
| 672 |
-
<li>ابتدا با دکمه "ایجاد فایل نمونه" یک فایل تست ایجاد کنید</li>
|
| 673 |
-
<li>فایل CSV خود را آپلود کنید (حتماً شامل ستونهای original_text و anonymized_text باشد)</li>
|
| 674 |
-
<li>روی "شروع Benchmark" کلیک کنید</li>
|
| 675 |
-
<li>نتایج را در بخشهای بالا مشاهده کنید</li>
|
| 676 |
-
<li>در صورت نیاز فایل کامل نتایج را دانلود کنید</li>
|
| 677 |
-
</ol>
|
| 678 |
-
</div>
|
| 679 |
-
""")
|
| 680 |
-
|
| 681 |
# Event handlers
|
| 682 |
-
|
| 683 |
-
|
| 684 |
-
return "لطفاً ابتدا فایل CSV را آپلود کنید.", None, gr.update()
|
| 685 |
-
return process_csv_file(file)
|
| 686 |
-
|
| 687 |
-
def handle_sample_creation():
|
| 688 |
-
result = create_sample_csv()
|
| 689 |
-
return gr.update(value=result, visible=True)
|
| 690 |
-
|
| 691 |
-
benchmark_btn.click(
|
| 692 |
-
fn=handle_benchmark_click,
|
| 693 |
inputs=[file_input],
|
| 694 |
-
outputs=[
|
| 695 |
-
)
|
| 696 |
-
|
| 697 |
-
sample_btn.click(
|
| 698 |
-
fn=handle_sample_creation,
|
| 699 |
-
outputs=[gr.Textbox(visible=False)]
|
| 700 |
)
|
| 701 |
|
| 702 |
download_btn.click(
|
|
@@ -712,11 +513,12 @@ def main():
|
|
| 712 |
return demo
|
| 713 |
|
| 714 |
demo = main()
|
|
|
|
| 715 |
if __name__ == "__main__":
|
| 716 |
port = int(os.getenv("PORT", "7860"))
|
| 717 |
demo.launch(
|
| 718 |
share=False,
|
| 719 |
-
server_name="0.0.0.0",
|
| 720 |
server_port=port,
|
| 721 |
show_error=True
|
| 722 |
)
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
# -*- coding: utf-8 -*-
|
| 3 |
"""
|
| 4 |
+
سیستم benchmark برای ناشناسسازی - فقط پردازش فایل آپلودی کاربر
|
| 5 |
"""
|
| 6 |
|
| 7 |
import pandas as pd
|
|
|
|
| 34 |
else:
|
| 35 |
return obj
|
| 36 |
|
| 37 |
+
# ===== کلاس پردازش entities =====
|
| 38 |
+
class EntityExtractor:
|
| 39 |
def __init__(self):
|
| 40 |
+
self.patterns = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
'COMPANY': [
|
|
|
|
|
|
|
|
|
|
| 42 |
r'شرکت\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 43 |
+
r'بانک\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 44 |
+
r'[\u0600-\u06FF\u0750-\u077F\s\u200C]*(?:پتروشیمی|بانک|شرکت|صنایع|تولید)[\u0600-\u06FF\u0750-\u077F\s\u200C]*',
|
| 45 |
+
r'[A-Z][a-zA-Z\s]+(?:Inc|Corp|Corporation|Company|Ltd|Limited|LLC)',
|
| 46 |
],
|
| 47 |
|
| 48 |
'LOCATION': [
|
| 49 |
r'بندر\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 50 |
r'شهر\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 51 |
r'استان\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 52 |
+
r'\b(?:تهران|اصفهان|ماهشهر|عسلویه|بندرعباس|اهواز|شیراز|مشهد|تبریز|کرج|قم|رشت|کرمان|یزد|زاهدان|بوشهر|خرمشهر|آبادان|اراک|قزوین|خوزستان)\b',
|
| 53 |
r'\b(?:ایران|عراق|کویت|عربستان|امارات|قطر|عمان|بحرین|ترکیه|پاکستان|افغانستان)\b',
|
| 54 |
+
r'\b(?:London|Paris|Tokyo|New\s+York|Dubai|Singapore|Hong\s+Kong)\b'
|
| 55 |
+
],
|
| 56 |
+
|
| 57 |
+
'PERSON': [
|
| 58 |
+
r'آقای\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 59 |
+
r'خانم\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 60 |
+
r'مهندس\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 61 |
+
r'دکتر\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 62 |
+
r'مدیرعامل\s+[\u0600-\u06FF\u0750-\u077F\s\u200C]+',
|
| 63 |
+
r'[\u0600-\u06FF\u0750-\u077F\s\u200C]+\s+مدیرعامل',
|
| 64 |
+
r'Mr\.\s+[a-zA-Z\s]+',
|
| 65 |
+
r'Ms\.\s+[a-zA-Z\s]+',
|
| 66 |
+
r'Dr\.\s+[a-zA-Z\s]+'
|
| 67 |
],
|
| 68 |
|
| 69 |
'DATE': [
|
|
|
|
| 76 |
r'(?:13[0-9]{2}|14[0-9]{2}|20[0-9]{2}|19[0-9]{2})(?=\s|$|،|\.)'
|
| 77 |
],
|
| 78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
'PHONE': [
|
| 80 |
+
r'(?:شماره[\s]*تماس[\s:]*)?(?:0)?9[\u06F0-\u06F90-9]{9}',
|
| 81 |
+
r'(?:تلفن[\s:]*)?(?:0)?[\u06F0-\u06F90-9]{2,3}[-\s]?[\u06F0-\u06F90-9]{7,8}',
|
|
|
|
|
|
|
| 82 |
r'[\u06F0-\u06F90-9]{11}(?!\d)',
|
| 83 |
+
r'\+[0-9]{1,3}[-\s][0-9]{3}[-\s][0-9]{3}[-\s][0-9]{4}',
|
|
|
|
|
|
|
| 84 |
],
|
| 85 |
|
| 86 |
'EMAIL': [
|
| 87 |
r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}',
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
],
|
| 89 |
|
| 90 |
'AMOUNT': [
|
| 91 |
r'\d+(?:,\d{3})*\s*(?:میلیون|میلیارد|هزار)\s*تومان',
|
| 92 |
r'مبلغ\s+\d+(?:,\d{3})*\s*(?:میلیون|میلیارد|هزار)?\s*تومان',
|
| 93 |
r'\d+\s*تومان',
|
| 94 |
+
r'\$\d+(?:,\d{3})*(?:\.\d+)?(?:\s*(?:million|billion|thousand|M|B|K))?',
|
| 95 |
r'\d+(?:,\d{3})*\s*ریال'
|
| 96 |
],
|
| 97 |
|
| 98 |
'ACCOUNT': [
|
| 99 |
+
r'(?:شماره[\s]*حساب[\s:]*)?[\u06F0-\u06F90-9]{3}[-\s]?[\u06F0-\u06F90-9]{3}[-\s]?[\u06F0-\u06F90-9]{6,12}',
|
| 100 |
+
r'(?:حساب[\s]*شماره[\s:]*)?[\u06F0-\u06F90-9]{8,20}',
|
|
|
|
|
|
|
|
|
|
| 101 |
]
|
| 102 |
}
|
| 103 |
+
|
| 104 |
+
def clean_entity(self, text):
|
| 105 |
+
"""تمیز کردن entity استخراج شده"""
|
| 106 |
+
# حذف کلمات اضافی در انتها
|
| 107 |
+
text = re.sub(r'\s*(در|که|با|به|از|را|و|یا)\s*$', '', text).strip()
|
|
|
|
| 108 |
# حذف فاصلههای اضافی
|
| 109 |
+
text = re.sub(r'\s+', ' ', text).strip()
|
| 110 |
+
return text
|
| 111 |
+
|
| 112 |
+
def is_valid_entity(self, text):
|
| 113 |
+
"""بررسی معتبر بودن entity"""
|
| 114 |
+
if len(text) < 2:
|
| 115 |
return False
|
| 116 |
|
| 117 |
+
# کلمات ممنوع
|
| 118 |
+
forbidden = ['شد', 'کرد', 'است', 'بود', 'در', 'که', 'با', 'از', 'به', 'را', 'و', 'یا']
|
| 119 |
+
if text.lower() in forbidden:
|
| 120 |
+
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
return True
|
| 123 |
+
|
| 124 |
+
def extract_entities(self, text):
|
| 125 |
+
"""استخراج entities از متن"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
if not text or text.strip() == '':
|
| 127 |
+
return {}
|
| 128 |
|
| 129 |
+
entities = {}
|
|
|
|
| 130 |
|
| 131 |
+
for category, pattern_list in self.patterns.items():
|
| 132 |
+
found_entities = []
|
| 133 |
+
|
| 134 |
for pattern_str in pattern_list:
|
| 135 |
try:
|
|
|
|
| 136 |
pattern = re.compile(pattern_str, re.IGNORECASE | re.MULTILINE)
|
|
|
|
| 137 |
matches = pattern.finditer(text)
|
| 138 |
+
|
| 139 |
for match in matches:
|
| 140 |
+
entity = self.clean_entity(match.group(0))
|
| 141 |
+
if self.is_valid_entity(entity):
|
| 142 |
+
found_entities.append(entity)
|
| 143 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
except re.error as e:
|
| 145 |
logger.error(f"Regex error in pattern {pattern_str}: {e}")
|
| 146 |
continue
|
| 147 |
+
|
| 148 |
+
# حذف تکراریها و مرتبسازی
|
| 149 |
+
if found_entities:
|
| 150 |
+
entities[category] = sorted(list(set(found_entities)))
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
+
return entities
|
| 153 |
|
| 154 |
+
def extract_anonymized_codes(self, text):
|
| 155 |
+
"""استخراج کدهای ناشناسسازی"""
|
| 156 |
+
if not text or text.strip() == '':
|
| 157 |
+
return {}
|
|
|
|
|
|
|
| 158 |
|
| 159 |
+
codes = {}
|
| 160 |
pattern = r'([a-zA-Z_]+)_(\d{3})'
|
| 161 |
|
| 162 |
try:
|
| 163 |
+
matches = re.finditer(pattern, text, re.IGNORECASE)
|
| 164 |
for match in matches:
|
| 165 |
category = match.group(1).upper()
|
| 166 |
code = match.group(0)
|
| 167 |
+
|
| 168 |
+
if category not in codes:
|
| 169 |
+
codes[category] = []
|
| 170 |
+
codes[category].append(code)
|
| 171 |
+
|
| 172 |
except Exception as e:
|
| 173 |
+
logger.error(f"Error extracting codes: {e}")
|
| 174 |
|
| 175 |
+
# حذف تکراریها
|
| 176 |
+
for category in codes:
|
| 177 |
+
codes[category] = sorted(list(set(codes[category])))
|
| 178 |
|
| 179 |
+
return codes
|
| 180 |
+
|
| 181 |
+
# ===== کلاس Benchmark =====
|
| 182 |
+
class AnonymizationBenchmark:
|
| 183 |
+
def __init__(self):
|
| 184 |
+
self.extractor = EntityExtractor()
|
| 185 |
|
| 186 |
+
def analyze_single_row(self, original_text, anonymized_text):
|
| 187 |
+
"""تحلیل یک ردیف از CSV"""
|
| 188 |
+
print(f"\n{'='*60}")
|
| 189 |
+
print("تحلیل دقیق ردیف:")
|
| 190 |
+
print(f"{'='*60}")
|
| 191 |
|
| 192 |
+
print(f"\n📝 متن اصلی:")
|
| 193 |
+
print(f"'{original_text}'")
|
| 194 |
+
print(f"طول: {len(original_text)} کاراکتر")
|
| 195 |
+
|
| 196 |
+
print(f"\n🔒 متن ناشناسسازی شده:")
|
| 197 |
+
print(f"'{anonymized_text}'")
|
| 198 |
+
print(f"طول: {len(anonymized_text)} کاراکتر")
|
| 199 |
|
| 200 |
# استخراج entities از متن اصلی
|
| 201 |
+
print(f"\n🔍 استخراج Entities از متن اصلی:")
|
| 202 |
+
original_entities = self.extractor.extract_entities(original_text)
|
| 203 |
+
|
| 204 |
+
total_original_entities = 0
|
| 205 |
+
for category, entities in original_entities.items():
|
| 206 |
+
print(f" {category}: {len(entities)} عدد")
|
| 207 |
+
for i, entity in enumerate(entities, 1):
|
| 208 |
+
print(f" {i}. '{entity}'")
|
| 209 |
+
total_original_entities += len(entities)
|
| 210 |
+
|
| 211 |
+
if not original_entities:
|
| 212 |
+
print(" هیچ entity ای یافت نشد!")
|
| 213 |
+
else:
|
| 214 |
+
print(f"\n✅ مجموع entities یافت شده: {total_original_entities}")
|
| 215 |
|
| 216 |
+
# استخراج کدهای ناشناسسازی
|
| 217 |
+
print(f"\n🔍 استخراج کدهای ناشناسسازی:")
|
| 218 |
+
anonymized_codes = self.extractor.extract_anonymized_codes(anonymized_text)
|
| 219 |
|
| 220 |
+
total_anonymized_codes = 0
|
| 221 |
+
for category, codes in anonymized_codes.items():
|
| 222 |
+
print(f" {category}: {len(codes)} عدد")
|
| 223 |
+
for i, code in enumerate(codes, 1):
|
| 224 |
+
print(f" {i}. '{code}'")
|
| 225 |
+
total_anonymized_codes += len(codes)
|
| 226 |
+
|
| 227 |
+
if not anonymized_codes:
|
| 228 |
+
print(" هیچ کد ناشناسسازی یافت نشد!")
|
| 229 |
+
else:
|
| 230 |
+
print(f"\n✅ مجموع کدهای ناشناسسازی: {total_anonymized_codes}")
|
| 231 |
+
|
| 232 |
+
# محاسبه متریکها
|
| 233 |
+
print(f"\n📊 محاسبه متریکها:")
|
| 234 |
category_metrics = {}
|
| 235 |
total_tp, total_fp, total_fn = 0, 0, 0
|
| 236 |
|
|
|
|
| 237 |
all_categories = set(original_entities.keys()) | set(anonymized_codes.keys())
|
| 238 |
|
| 239 |
for category in all_categories:
|
| 240 |
original_count = len(original_entities.get(category, []))
|
| 241 |
anonymized_count = len(anonymized_codes.get(category, []))
|
| 242 |
|
|
|
|
| 243 |
tp = min(original_count, anonymized_count)
|
|
|
|
|
|
|
| 244 |
fp = max(0, anonymized_count - original_count)
|
|
|
|
|
|
|
| 245 |
fn = max(0, original_count - anonymized_count)
|
| 246 |
|
|
|
|
| 247 |
precision = tp / (tp + fp) if (tp + fp) > 0 else 0
|
| 248 |
recall = tp / (tp + fn) if (tp + fn) > 0 else 0
|
| 249 |
f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
|
| 250 |
|
| 251 |
+
print(f"\n {category}:")
|
| 252 |
+
print(f" Original: {original_count}, Anonymized: {anonymized_count}")
|
| 253 |
+
print(f" TP: {tp}, FP: {fp}, FN: {fn}")
|
| 254 |
+
print(f" Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1_score:.4f}")
|
| 255 |
+
|
| 256 |
category_metrics[category] = {
|
| 257 |
'original_count': original_count,
|
| 258 |
'anonymized_count': anonymized_count,
|
| 259 |
+
'tp': tp, 'fp': fp, 'fn': fn,
|
| 260 |
+
'precision': precision, 'recall': recall, 'f1_score': f1_score
|
|
|
|
|
|
|
|
|
|
|
|
|
| 261 |
}
|
| 262 |
|
| 263 |
total_tp += tp
|
| 264 |
total_fp += fp
|
| 265 |
total_fn += fn
|
| 266 |
|
| 267 |
+
# متریکهای کلی
|
| 268 |
overall_precision = total_tp / (total_tp + total_fp) if (total_tp + total_fp) > 0 else 0
|
| 269 |
overall_recall = total_tp / (total_tp + total_fn) if (total_tp + total_fn) > 0 else 0
|
| 270 |
overall_f1 = 2 * (overall_precision * overall_recall) / (overall_precision + overall_recall) if (overall_precision + overall_recall) > 0 else 0
|
| 271 |
+
accuracy = total_tp / total_original_entities if total_original_entities > 0 else 0
|
| 272 |
|
| 273 |
+
print(f"\n🎯 متریکهای کلی:")
|
| 274 |
+
print(f" Precision: {overall_precision:.4f}")
|
| 275 |
+
print(f" Recall: {overall_recall:.4f}")
|
| 276 |
+
print(f" F1-Score: {overall_f1:.4f}")
|
| 277 |
+
print(f" Accuracy: {accuracy:.4f}")
|
| 278 |
|
| 279 |
return {
|
| 280 |
'original_entities': original_entities,
|
| 281 |
'anonymized_codes': anonymized_codes,
|
| 282 |
'category_metrics': category_metrics,
|
| 283 |
'overall_metrics': {
|
| 284 |
+
'total_original_entities': total_original_entities,
|
| 285 |
+
'total_anonymized_entities': total_anonymized_codes,
|
| 286 |
'total_tp': total_tp,
|
| 287 |
'total_fp': total_fp,
|
| 288 |
'total_fn': total_fn,
|
|
|
|
| 293 |
}
|
| 294 |
}
|
| 295 |
|
| 296 |
+
def process_csv(self, csv_file_path):
|
| 297 |
+
"""پردازش فایل CSV"""
|
|
|
|
|
|
|
| 298 |
try:
|
| 299 |
+
# خواندن فایل با encoding های مختلف
|
| 300 |
+
df = None
|
| 301 |
for encoding in ['utf-8', 'utf-8-sig', 'cp1256', 'windows-1256']:
|
| 302 |
try:
|
| 303 |
df = pd.read_csv(csv_file_path, encoding=encoding)
|
| 304 |
+
print(f"✅ فایل با encoding {encoding} خوانده شد")
|
| 305 |
break
|
| 306 |
except UnicodeDecodeError:
|
| 307 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
|
| 309 |
+
if df is None:
|
| 310 |
+
return "❌ خطا: نمیتوان فایل را با هیچ encoding خواند"
|
| 311 |
+
|
| 312 |
+
print(f"📋 اطلاعات فایل CSV:")
|
| 313 |
+
print(f" تعداد ردیفها: {len(df)}")
|
| 314 |
+
print(f" ستونها: {df.columns.tolist()}")
|
| 315 |
+
|
| 316 |
+
# بررسی ستونها
|
| 317 |
+
if 'original_text' not in df.columns or 'anonymized_text' not in df.columns:
|
| 318 |
+
return f"❌ خطا: فایل باید شامل ستونهای 'original_text' و 'anonymized_text' باشد. ستونهای موجود: {df.columns.tolist()}"
|
| 319 |
+
|
| 320 |
+
if len(df) == 0:
|
| 321 |
+
return "❌ خطا: فایل خالی است"
|
| 322 |
+
|
| 323 |
+
# پردازش هر ردیف
|
| 324 |
+
results = []
|
| 325 |
+
all_analysis = []
|
| 326 |
+
|
| 327 |
+
for index, row in df.iterrows():
|
| 328 |
+
print(f"\n🔄 پردازش ردیف {index + 1} از {len(df)}")
|
| 329 |
+
|
| 330 |
original_text = str(row['original_text']) if pd.notna(row['original_text']) else ""
|
| 331 |
anonymized_text = str(row['anonymized_text']) if pd.notna(row['anonymized_text']) else ""
|
| 332 |
|
| 333 |
if original_text.strip() == "" and anonymized_text.strip() == "":
|
| 334 |
+
print("⚠️ ردیف خالی است، رد میشود")
|
| 335 |
continue
|
| 336 |
|
| 337 |
+
# تحلیل دقیق
|
| 338 |
+
analysis = self.analyze_single_row(original_text, anonymized_text)
|
| 339 |
+
all_analysis.append(analysis)
|
| 340 |
|
| 341 |
+
# ذخیره نتیجه
|
| 342 |
result = {
|
| 343 |
'row_id': int(index),
|
| 344 |
'original_text': original_text,
|
| 345 |
'anonymized_text': anonymized_text,
|
| 346 |
+
**{k: convert_to_serializable(v) for k, v in analysis['overall_metrics'].items()}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 347 |
}
|
| 348 |
|
| 349 |
+
# اضافه کردن متریکهای category
|
| 350 |
+
for category, metrics in analysis['category_metrics'].items():
|
| 351 |
+
for metric_name, value in metrics.items():
|
| 352 |
+
result[f'{category.lower()}_{metric_name}'] = convert_to_serializable(value)
|
|
|
|
|
|
|
|
|
|
| 353 |
|
| 354 |
results.append(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 355 |
|
| 356 |
+
if not results:
|
| 357 |
+
return "❌ خطا: هیچ ردیف معتبری برای پردازش یافت نشد"
|
|
|
|
|
|
|
| 358 |
|
| 359 |
+
return pd.DataFrame(results), all_analysis
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 360 |
|
| 361 |
except Exception as e:
|
| 362 |
+
return f"❌ خطا در پردازش: {str(e)}"
|
|
|
|
| 363 |
|
| 364 |
+
# ===== رابط Gradio =====
|
| 365 |
+
def process_uploaded_file(file):
|
| 366 |
+
"""پردازش فایل آپلود شده کاربر"""
|
| 367 |
+
if file is None:
|
| 368 |
+
return "❌ لطفاً ابتدا فایل CSV را آپلود کنید.", None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 369 |
|
| 370 |
+
print(f"\n🚀 شروع پردازش فایل: {file.name}")
|
|
|
|
|
|
|
| 371 |
|
| 372 |
+
benchmark = AnonymizationBenchmark()
|
| 373 |
+
result = benchmark.process_csv(file.name)
|
| 374 |
|
| 375 |
+
if isinstance(result, str):
|
| 376 |
+
# خطا رخ داده
|
| 377 |
+
return result, None
|
|
|
|
|
|
|
|
|
|
| 378 |
|
| 379 |
+
results_df, all_analysis = result
|
| 380 |
+
|
| 381 |
+
# تولید گزارش نهایی
|
| 382 |
+
total_rows = len(results_df)
|
| 383 |
+
avg_precision = results_df['precision'].mean() if 'precision' in results_df.columns else 0
|
| 384 |
+
avg_recall = results_df['recall'].mean() if 'recall' in results_df.columns else 0
|
| 385 |
+
avg_f1 = results_df['f1_score'].mean() if 'f1_score' in results_df.columns else 0
|
| 386 |
+
avg_accuracy = results_df['accuracy'].mean() if 'accuracy' in results_df.columns else 0
|
| 387 |
+
|
| 388 |
+
total_original = results_df['total_original_entities'].sum() if 'total_original_entities' in results_df.columns else 0
|
| 389 |
+
total_anonymized = results_df['total_anonymized_entities'].sum() if 'total_anonymized_entities' in results_df.columns else 0
|
| 390 |
+
total_tp = results_df['total_tp'].sum() if 'total_tp' in results_df.columns else 0
|
| 391 |
+
total_fp = results_df['total_fp'].sum() if 'total_fp' in results_df.columns else 0
|
| 392 |
+
total_fn = results_df['total_fn'].sum() if 'total_fn' in results_df.columns else 0
|
| 393 |
+
|
| 394 |
+
# گزارش نهایی
|
| 395 |
+
report = f"""
|
| 396 |
+
{'='*60}
|
| 397 |
+
📊 گزارش نهایی Benchmark
|
| 398 |
+
{'='*60}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 399 |
|
| 400 |
+
📈 آمار کلی:
|
| 401 |
+
• تعداد ردیفهای پردازش شده: {total_rows}
|
| 402 |
+
• مجموع Entities اصلی: {total_original}
|
| 403 |
+
• مجموع کدهای ناشناسسازی: {total_anonymized}
|
| 404 |
+
• True Positives: {total_tp}
|
| 405 |
+
• False Positives: {total_fp}
|
| 406 |
+
• False Negatives: {total_fn}
|
| 407 |
|
| 408 |
+
🎯 متریکهای میانگین:
|
| 409 |
+
• Precision: {avg_precision:.4f}
|
| 410 |
+
• Recall: {avg_recall:.4f}
|
| 411 |
+
• F1-Score: {avg_f1:.4f}
|
| 412 |
+
• Accuracy: {avg_accuracy:.4f}
|
|
|
|
| 413 |
|
| 414 |
+
📋 جزئیات هر ردیف در جدول زیر نمایش داده شده است.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 415 |
"""
|
| 416 |
+
|
| 417 |
+
# ذخیره نتایج
|
| 418 |
+
try:
|
| 419 |
+
results_df.to_csv("benchmark_results.csv", index=False, encoding='utf-8-sig')
|
| 420 |
+
print("✅ نتایج در فایل benchmark_results.csv ذخیره شد")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 421 |
except Exception as e:
|
| 422 |
+
print(f"⚠️ خطا در ذخیره فایل: {e}")
|
| 423 |
+
|
| 424 |
+
# ستونهای مهم برای نمایش
|
| 425 |
+
display_columns = ['row_id', 'total_original_entities', 'total_anonymized_entities',
|
| 426 |
+
'tp', 'fp', 'fn', 'precision', 'recall', 'f1_score', 'accuracy']
|
| 427 |
+
|
| 428 |
+
display_df = results_df[[col for col in display_columns if col in results_df.columns]]
|
| 429 |
+
|
| 430 |
+
return report, display_df
|
| 431 |
|
| 432 |
def download_results():
|
| 433 |
+
"""دانلود نتایج"""
|
| 434 |
+
if os.path.exists("benchmark_results.csv"):
|
| 435 |
+
return "benchmark_results.csv"
|
| 436 |
return None
|
| 437 |
|
| 438 |
+
# ===== رابط اصلی =====
|
| 439 |
def main():
|
| 440 |
+
with gr.Blocks(title="Benchmark System", theme=gr.themes.Soft()) as demo:
|
|
|
|
|
|
|
| 441 |
|
| 442 |
gr.HTML("""
|
| 443 |
<h1 style='text-align: center; color: #2E86AB; margin-bottom: 30px;'>
|
| 444 |
+
🎯 سیستم Benchmark ناشناسسازی - پردازش فایل آپلودی
|
| 445 |
</h1>
|
| 446 |
""")
|
| 447 |
|
| 448 |
with gr.Row():
|
| 449 |
with gr.Column():
|
|
|
|
| 450 |
gr.HTML("""
|
| 451 |
+
<div style='background: #e8f4fd; padding: 15px; border-radius: 10px; margin-bottom: 15px;'>
|
| 452 |
+
<h3>📋 نحوه کارکرد:</h3>
|
| 453 |
+
<ol>
|
| 454 |
+
<li><b>ستون اول (original_text)</b>: سیستم تمام entities را پیدا میکند</li>
|
| 455 |
+
<li><b>ستون دوم (anonymized_text)</b>: کدهای ناشناسسازی را پیدا میکند</li>
|
| 456 |
+
<li><b>مقایسه</b>: متریکهای Precision, Recall, F1-Score, Accuracy محاسبه میشود</li>
|
| 457 |
+
<li><b>نتیجه</b>: تحلیل دقیق هر ردیف نمایش داده میشود</li>
|
| 458 |
+
</ol>
|
| 459 |
</div>
|
| 460 |
""")
|
| 461 |
|
| 462 |
file_input = gr.File(
|
| 463 |
+
label="📁 فایل CSV خود را آپلود کنید",
|
| 464 |
file_types=[".csv"],
|
| 465 |
file_count="single"
|
| 466 |
)
|
| 467 |
|
| 468 |
+
process_btn = gr.Button("🚀 شروع پردازش فایل آپلودی", variant="primary", size="lg")
|
|
|
|
|
|
|
| 469 |
|
| 470 |
with gr.Row():
|
| 471 |
with gr.Column():
|
| 472 |
+
gr.HTML("<h3>📊 نتایج تحلیل</h3>")
|
| 473 |
|
| 474 |
+
results_output = gr.Textbox(
|
| 475 |
+
label="گزارش کامل",
|
| 476 |
+
lines=25,
|
| 477 |
+
max_lines=30,
|
| 478 |
interactive=False
|
| 479 |
)
|
| 480 |
|
| 481 |
with gr.Row():
|
| 482 |
with gr.Column():
|
| 483 |
+
gr.HTML("<h3>📋 جدول نتایج</h3>")
|
| 484 |
|
| 485 |
results_table = gr.Dataframe(
|
| 486 |
+
label="متریکهای هر ردیف",
|
| 487 |
interactive=False,
|
| 488 |
wrap=True
|
| 489 |
)
|
| 490 |
|
| 491 |
with gr.Row():
|
| 492 |
with gr.Column():
|
| 493 |
+
download_btn = gr.Button("💾 دانلود نتایج", variant="secondary")
|
| 494 |
download_file = gr.File(label="فایل نتایج", visible=False)
|
| 495 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 496 |
# Event handlers
|
| 497 |
+
process_btn.click(
|
| 498 |
+
fn=process_uploaded_file,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 499 |
inputs=[file_input],
|
| 500 |
+
outputs=[results_output, results_table]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 501 |
)
|
| 502 |
|
| 503 |
download_btn.click(
|
|
|
|
| 513 |
return demo
|
| 514 |
|
| 515 |
demo = main()
|
| 516 |
+
|
| 517 |
if __name__ == "__main__":
|
| 518 |
port = int(os.getenv("PORT", "7860"))
|
| 519 |
demo.launch(
|
| 520 |
share=False,
|
| 521 |
+
server_name="0.0.0.0",
|
| 522 |
server_port=port,
|
| 523 |
show_error=True
|
| 524 |
)
|