hajimammad commited on
Commit
c5a3c0b
·
verified ·
1 Parent(s): c08cf48

Upload golden_builder.py

Browse files
Files changed (1) hide show
  1. golden_builder.py +186 -0
golden_builder.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # golden_builder.py
2
+ # -*- coding: utf-8 -*-
3
+ import json, re, logging, hashlib
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+ from typing import Dict, List, Optional
7
+ from collections import Counter
8
+
9
+ import numpy as np
10
+ import torch
11
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
12
+
13
+ log = logging.getLogger("golden-builder")
14
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
15
+
16
+ # ========= Utilities =========
17
+ PERSIAN_MAP = {'ك':'ک','ى':'ی','ﻲ':'ی','ﯽ':'ی','أ':'ا','إ':'ا'}
18
+ NOISE = [r"http[s]?://\S+", r"www\.\S+", r"\d{10,}", r"(.)\1{4,}", r"[^\u0600-\u06FF\s\d\.,;:!?()\"'\-]+"]
19
+
20
+ def clean_text(s: str) -> str:
21
+ if not isinstance(s, str): return ""
22
+ for a,b in PERSIAN_MAP.items(): s = s.replace(a,b)
23
+ for pat in NOISE: s = re.sub(pat, " ", s)
24
+ s = re.sub(r"\s+", " ", s)
25
+ s = re.sub(r"\.{2,}", "...", s)
26
+ s = re.sub(r"\s+([،.;:!?])", r"\1", s)
27
+ s = re.sub(r"([،.;:!?])(?=[^\s])", r"\1 ", s)
28
+ return s.strip()
29
+
30
+ def md5(s: str) -> str:
31
+ import hashlib as _h
32
+ return _h.md5(s.encode("utf-8")).hexdigest()
33
+
34
+ def lex_diversity(s: str) -> float:
35
+ toks = s.split()
36
+ return 0.0 if not toks else len(set(toks))/len(toks)
37
+
38
+ def has_repetition(s: str, n:int=3) -> bool:
39
+ toks = s.split()
40
+ if len(toks) < n: return False
41
+ grams = [tuple(toks[i:i+n]) for i in range(len(toks)-n+1)]
42
+ from collections import Counter
43
+ return any(c>2 for c in Counter(grams).values())
44
+
45
+ # ========= Lightweight NER (regex spans برای متادیتا) =========
46
+ @dataclass
47
+ class LegalEntity:
48
+ text: str; category: str; start: int; end: int; weight: float
49
+
50
+ class LegalEntityExtractor:
51
+ def __init__(self):
52
+ self._defs = {
53
+ "STATUTE": ( [r"قانون\s+(?:اساسی|مدنی|کیفری|کار|تجارت|مجازات)",
54
+ r"آیین\s+دادرسی\s+(?:مدنی|کیفری)",
55
+ r"ماده\s+\d+", r"تبصره\s+\d+"], 1.0 ),
56
+ "COURT": ( [r"دیوان\s+(?:عالی|عدالت)", r"دادگاه\s+(?:عمومی|تجدیدنظر|انقلاب)", r"شعبه\s+\d+"], 0.9 ),
57
+ "CRIME": ( [r"کلاهبرداری|اختلاس|ارتشا|خیانت\s+در\s+امانت|جعل|سرقت|قتل"], 1.2 ),
58
+ "PENALTY": ( [r"حبس|جزای\s+نقدی|شلاق|قصاص|دیه|محرومیت\s+از\s+حقوق\s+اجتماعی"], 1.1 ),
59
+ "CIVIL": ( [r"قرارداد|عقد\s+(?:بیع|اجاره|رهن|نکاح)|خسارت|تعهد|ضمان|مطالبه"], 0.8 ),
60
+ "PROCED": ( [r"دادخواست|لایحه|شکواییه|ابلاغ|جلسه\s+دادرسی|کارشناسی|دلایل\s+اثباتی"], 0.7 ),
61
+ "PARTY": ( [r"خواهان|خوانده|شاکی|متهم|وکیل\s+دادگستری|دادستان|قاضی"], 0.6 ),
62
+ "BUSINESS": ( [r"شرکت\s+(?:سهامی|مسئولیت\s+محدود)|ورشکستگی|سهام|چک|سفته|برات"], 0.6 ),
63
+ }
64
+ self._patterns = []
65
+ for cat,(pats,w) in self._defs.items():
66
+ for p in pats:
67
+ self._patterns.append( (re.compile(p, re.IGNORECASE), cat, w) )
68
+
69
+ def extract(self, text:str) -> List[LegalEntity]:
70
+ out, seen = [], set()
71
+ for rgx, cat, w in self._patterns:
72
+ for m in rgx.finditer(text):
73
+ s,e = m.span()
74
+ if (s,e) in seen: continue
75
+ seen.add((s,e))
76
+ out.append(LegalEntity(m.group(), cat, s, e, w))
77
+ out.sort(key=lambda x: x.start)
78
+ return out
79
+
80
+ # ========= Builder =========
81
+ class GoldenBuilder:
82
+ def __init__(self, model_name: str = "google/mt5-base", device: Optional[str] = None,
83
+ min_len:int=40, max_len:int=160, min_entities:int=2):
84
+ self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
85
+ log.info("Device: %s", self.device)
86
+ self.tok = AutoTokenizer.from_pretrained(model_name, use_fast=True)
87
+ self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(self.device)
88
+ self.model.eval()
89
+ self.min_len, self.max_len = min_len, max_len
90
+ self.min_entities = min_entities
91
+ self._seen_hashes = set()
92
+ self.ner = LegalEntityExtractor()
93
+
94
+ def _summarize_batch(self, texts: List[str], num_beams:int=6) -> List[str]:
95
+ if not texts: return []
96
+ inputs = self.tok(texts, return_tensors="pt", truncation=True, padding=True, max_length=512).to(self.device)
97
+ with torch.no_grad():
98
+ ids = self.model.generate(
99
+ **inputs,
100
+ max_length=self.max_len, min_length=self.min_len,
101
+ num_beams=num_beams, early_stopping=True,
102
+ length_penalty=2.5, no_repeat_ngram_size=3, do_sample=False
103
+ )
104
+ return self.tok.batch_decode(ids, skip_special_tokens=True)
105
+
106
+ def _quality_gate(self, src:str, tgt:str, ents:List[LegalEntity]) -> bool:
107
+ s_len, t_len = len(src.split()), len(tgt.split())
108
+ if not (30 <= s_len and 20 <= t_len <= 220): return False
109
+ comp = (t_len/(s_len+1e-8))
110
+ if not (0.12 <= comp <= 0.65): return False
111
+ if lex_diversity(tgt) < 0.4: return False
112
+ if has_repetition(tgt, 3): return False
113
+ if len(ents) < self.min_entities: return False
114
+ ent_density = (sum((e.end - e.start) for e in ents) / max(len(src),1)) * 100
115
+ if ent_density < 4.0: return False
116
+ return True
117
+
118
+ def build(self, raw_items: List[Dict], text_key:str="متن_کامل", batch_size:int=4) -> List[Dict]:
119
+ rows, i = [], 0
120
+ N = len(raw_items)
121
+ while i < N:
122
+ chunk = raw_items[i:i+batch_size]
123
+ cleaned = [clean_text(str(it.get(text_key, ""))) for it in chunk]
124
+ # de-dup + کوتاه‌زدایی
125
+ todo = []
126
+ for c in cleaned:
127
+ if len(c.split()) < 30:
128
+ todo.append("")
129
+ continue
130
+ h = md5(c)
131
+ if h in self._seen_hashes:
132
+ todo.append("")
133
+ continue
134
+ self._seen_hashes.add(h)
135
+ todo.append(c)
136
+ # Summarize only valid items
137
+ inputs = [f"summarize: {t}" for t in todo if t]
138
+ outputs = self._summarize_batch(inputs) if inputs else []
139
+ k = 0
140
+ for c in todo:
141
+ if not c: continue
142
+ tgt = clean_text(outputs[k]); k += 1
143
+ ents = self.ner.extract(c)
144
+ if not self._quality_gate(c, tgt, ents):
145
+ continue
146
+ ents_payload = [{"text": e.text, "category": e.category, "start": e.start, "end": e.end, "weight": e.weight}
147
+ for e in ents[:20]]
148
+ rows.append({
149
+ "input": f"summarize: {c}",
150
+ "output": tgt,
151
+ "metadata": {
152
+ "input_length": len(c.split()),
153
+ "target_length": len(tgt.split())
154
+ },
155
+ "legal_entities": {
156
+ "total_entities": len(ents),
157
+ "categories": dict(Counter(e.category for e in ents)),
158
+ "entities": ents_payload
159
+ }
160
+ })
161
+ i += batch_size
162
+ return rows
163
+
164
+ # ========= I/O =========
165
+ def load_json_or_jsonl(path: str) -> List[Dict]:
166
+ p = Path(path)
167
+ raw = p.read_text(encoding="utf-8").strip()
168
+ try:
169
+ data = json.loads(raw)
170
+ return data if isinstance(data, list) else [data]
171
+ except json.JSONDecodeError:
172
+ out = []
173
+ for ln in raw.splitlines():
174
+ ln = ln.strip()
175
+ if not ln: continue
176
+ try:
177
+ out.append(json.loads(ln))
178
+ except json.JSONDecodeError:
179
+ pass
180
+ return out
181
+
182
+ def save_jsonl(rows: List[Dict], out_path: str):
183
+ p = Path(out_path); p.parent.mkdir(parents=True, exist_ok=True)
184
+ with p.open("w", encoding="utf-8") as f:
185
+ for r in rows:
186
+ f.write(json.dumps(r, ensure_ascii=False) + "\n")