hajimammad commited on
Commit
9195200
·
verified ·
1 Parent(s): a1df4d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1006 -181
app.py CHANGED
@@ -1,208 +1,1033 @@
1
  # -*- coding: utf-8 -*-
2
- # Mahoon — Minimal RAG + Generation (ZeroGPU-ready, no training)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- import os
5
- import json
6
  import gradio as gr
 
7
 
8
- # =========================
9
- # ZeroGPU shim & marker
10
- # =========================
11
- try:
12
- import spaces # provided by HF Spaces runtime
13
- except Exception:
14
- class _NoSpaces:
15
- @staticmethod
16
- def GPU(*a, **k):
17
- def w(fn): return fn
18
- return w
19
- spaces = _NoSpaces()
20
-
21
- @spaces.GPU(duration=180) # وجود این تابع جلوی ارور No @spaces.GPU را می‌گیرد
22
- def _zgpu_marker():
23
- return "ok"
24
-
25
- # =========================
26
- # RAG (Chroma)
27
- # =========================
28
  import chromadb
29
  from chromadb.config import Settings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- CHROMA_DIR = os.environ.get("CHROMA_DIR", "./chroma_db")
32
- CHROMA_COLLECTION = os.environ.get("CHROMA_COLLECTION", "legal_articles")
 
 
 
33
 
34
- def _norm_id(x: str) -> str:
35
- x = (x or "").replace("\u064A","ی").replace("\u0643","ک")
36
- trans = {ord(a): b for a,b in zip("٠١٢٣٤٥٦٧٨٩۰۱۲۳۴۵۶۷۸۹","01234567890123456789")}
37
- return "".join((x.translate(trans))).replace(" ", "")
 
 
 
 
38
 
39
- def build_rag():
40
- client = chromadb.PersistentClient(
41
- path=CHROMA_DIR,
42
- settings=Settings(anonymized_telemetry=False)
43
- )
44
  try:
45
- col = client.get_or_create_collection(CHROMA_COLLECTION)
46
- except Exception:
47
- col = client.get_collection(CHROMA_COLLECTION)
48
- return col
 
 
 
 
 
 
49
 
50
- def retrieve(col, query: str, top_k: int, thr: float):
 
 
 
51
  try:
52
- res = col.query(
53
- query_texts=[query],
54
- n_results=int(top_k),
55
- include=["documents","metadatas","distances"]
56
- )
57
- docs = res.get("documents",[[]])[0]
58
- metas= res.get("metadatas",[[]])[0]
59
- dists= res.get("distances",[[]])[0]
60
- out=[]
61
- for i,(d,m,dist) in enumerate(zip(docs, metas, dists)):
62
- sim = 1.0 - float(dist)
63
- if sim >= float(thr):
64
- out.append({
65
- "article_id": _norm_id((m or {}).get("article_id", f"unk_{i}")),
66
- "text": d,
67
- "similarity": sim
68
- })
69
- return out
70
  except Exception:
71
- return []
72
-
73
- def build_context(arts, limit=320):
74
- if not arts: return ""
75
- bullets = [f"• ماده {a['article_id']}: {a['text'][:limit]}..." for a in arts]
76
- return "مواد مرتبط:\n" + "\n".join(bullets)
77
-
78
- # =========================
79
- # Generation (Transformers)
80
- # =========================
81
- # برای اجتناب از نیاز زودهنگام به torch، import را داخل توابع انجام می‌دهیم.
82
- MODEL_CHOICES = {
83
- "Qwen2.5-7B Instruct": "Qwen/Qwen2.5-7B-Instruct",
84
- "Llama 3.2 3B Instruct": "meta-llama/Llama-3.2-3B-Instruct",
85
- "Mistral 7B Instruct v0.2": "mistralai/Mistral-7B-Instruct-v0.2"
86
- }
87
- DEFAULT_MODEL_KEY = os.environ.get("DEFAULT_MODEL_KEY", "Llama 3.2 3B Instruct")
88
-
89
- _loader = {"tk": None, "model_id": None}
90
- _rag_col = None
91
-
92
- def lazy_bootstrap(selected_key: str):
93
- """اتصال به ایندکس RAG + Warm tokenizer. وزن مدل را بعداً در تابع GPU لود می‌کنیم."""
94
- global _rag_col, _loader
95
- # RAG
96
- if _rag_col is None:
97
- try:
98
- _rag_col = build_rag()
99
- except Exception as e:
100
- return f"❌ خطا در اتصال RAG: {e}"
101
-
102
- # Tokenizer
103
- wanted = MODEL_CHOICES.get(selected_key, MODEL_CHOICES[DEFAULT_MODEL_KEY])
104
- if _loader["model_id"] != wanted or _loader["tk"] is None:
105
- from transformers import AutoTokenizer
106
- tk = AutoTokenizer.from_pretrained(wanted)
107
- if tk.pad_token is None and tk.eos_token:
108
- tk.pad_token = tk.eos_token
109
- _loader.update({"tk": tk, "model_id": wanted})
110
-
111
- return f"✅ آماده · ایندکس: {CHROMA_COLLECTION} · مدل: {wanted}"
112
-
113
- def _format_prompt(context: str, question: str) -> str:
114
- if context:
115
- return f"{context}\nسوال: {question}\nپاسخ:"
116
- return f"سوال: {question}\nپاسخ:"
117
-
118
- @spaces.GPU(duration=240)
119
- def answer_gpu(model_key, question, use_rag, top_k, thr, max_new_tokens, temperature, top_p):
120
- """اینفرنس روی GPU (ZeroGPU per-call)."""
121
- try:
122
- if not question or not question.strip():
123
- return "لطفاً سؤال را وارد کنید.", ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  # RAG
126
- arts = retrieve(_rag_col, question, int(top_k), float(thr)) if use_rag else []
127
- ctx = build_context(arts) if arts else ""
128
- prompt = _format_prompt(ctx, question)
129
-
130
- # بارگذاری وزن‌ها روی GPU رزروشده
131
- from transformers import AutoTokenizer, AutoModelForCausalLM
132
- model_id = _loader["model_id"] or MODEL_CHOICES.get(model_key) or MODEL_CHOICES[DEFAULT_MODEL_KEY]
133
- tk = _loader["tk"] or AutoTokenizer.from_pretrained(model_id)
134
- mdl = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto") # ZeroGPU → GPU attach
135
-
136
- enc = tk(prompt, return_tensors="pt")
137
- enc = {k: v.to(mdl.device) for k,v in enc.items()}
138
- out = mdl.generate(
139
- **enc,
140
- max_new_tokens=int(max_new_tokens),
141
- do_sample=True,
142
- temperature=float(temperature),
143
- top_p=float(top_p),
144
- pad_token_id=tk.pad_token_id or tk.eos_token_id
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  )
146
- text = tk.decode(out[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  refs = ""
149
  if arts:
150
- refs = "\n\n" + "\n".join([
151
- f"**ماده {a['article_id']}** (شباهت: {a['similarity']:.2f})\n{a['text'][:380]}..."
152
- for a in arts
153
- ])
154
- return text, refs
155
- except Exception as e:
156
- return f"❌ خطای اینفرنس: {e}", ""
157
-
158
- # =========================
159
- # UI (Gradio 5.47)
160
- # =========================
161
- with gr.Blocks(title="Mahoon — Minimal RAG+Gen", theme=gr.themes.Soft()) as demo:
162
- gr.Markdown("""
163
- <div style='text-align:center;padding:14px'>
164
- <h2 style='margin:0'>ماحون (مینیمال) — پاسخ حقوقی با RAG</h2>
165
- <p style='color:#666'>اینفرنس ZeroGPU · ایندکس آماده · بدون آموزش</p>
166
- </div>
167
- """)
168
-
169
- with gr.Row():
170
- model_dd = gr.Dropdown(choices=list(MODEL_CHOICES.keys()),
171
- value=DEFAULT_MODEL_KEY,
172
- label="مدل تولید")
173
- use_rag = gr.Checkbox(value=True, label="استفاده از RAG؟")
174
- top_k = gr.Slider(1, 10, value=5, step=1, label="Top-K")
175
- thr = gr.Slider(0.50, 0.95, value=0.60, step=0.01, label="آستانه شباهت")
176
-
177
- with gr.Accordion("پارامترهای تولید", open=False):
178
- max_new_tokens = gr.Slider(64, 1024, value=256, step=16, label="max_new_tokens")
179
- temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="temperature")
180
- top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="top_p")
181
-
182
- question = gr.Textbox(lines=3, label="سؤال")
183
- ask_btn = gr.Button("پرسش", variant="primary")
184
- answer = gr.Markdown(label="پاسخ")
185
- refs = gr.Markdown(label="مواد مرتبط")
186
-
187
- status = gr.Markdown("⏳ آماده‌سازی…")
188
-
189
- def _warmup(mkey):
190
- try:
191
- return lazy_bootstrap(mkey)
192
  except Exception as e:
193
- return f" Bootstrap error: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
- demo.load(_warmup, inputs=[model_dd], outputs=status)
 
 
196
 
197
- ask_btn.click(
198
- answer_gpu,
199
- inputs=[model_dd, question, use_rag, top_k, thr, max_new_tokens, temperature, top_p],
200
- outputs=[answer, refs]
201
- )
202
 
 
 
 
203
  if __name__ == "__main__":
 
 
204
  try:
205
- demo = demo.queue() # پایدارتر روی Gradio 5.x
206
  except TypeError:
207
  pass
208
- demo.launch(ssr_mode=False)
 
1
  # -*- coding: utf-8 -*-
2
+ """
3
+ Mahoon Legal AI — Causal-only Generation + Hybrid RAG + W&B + ZeroGPU + Role Gating
4
+
5
+ - تب «مشاوره» برای همه تعاملی است.
6
+ - تب‌های «ایندکس»، «ساخت دیتاست»، «پاکسازی»، «آموزش»، «Weight Tuning» برای بازدیدکننده فقط نمایشی‌اند؛
7
+ و سمت‌سرور نیز گِیت نقش دارد (ادمین/بازدیدکننده).
8
+
9
+ پیش‌نیازها:
10
+ - golden_builder.py , weights_sweep.py
11
+ - Settings → Secrets: WANDB_API_KEY (در صورت استفاده از W&B)
12
+ - Settings → Environment Variables: ADMIN_USERS (مثلاً: haji-mammad, teammate1)
13
+ - requirements.txt (ZeroGPU-ready) شامل spaces>=0.42.0
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+ # --- Telemetry hard-off + ZeroGPU SDK (must be before chroma import) ---
19
+ import os, logging
20
+ os.environ["CHROMA_TELEMETRY_ENABLED"] = "false"
21
+ os.environ["ANONYMIZED_TELEMETRY"] = "false"
22
+
23
+ import spaces # ZeroGPU SDK
24
+
25
+ # (اختیاری) کاهش نویز لاگ‌ها
26
+ logging.getLogger("chromadb").setLevel(logging.ERROR)
27
+ logging.getLogger("posthog").setLevel(logging.CRITICAL)
28
+ # -----------------------------------------------------------------------
29
+
30
+ import sys, re, json, time, pickle, zipfile, warnings
31
+ from dataclasses import dataclass, field
32
+ from pathlib import Path
33
+ from typing import List, Dict, Optional
34
+
35
+ import numpy as np
36
+ import torch
37
+ from torch.utils.data import Dataset
38
+ from sklearn.model_selection import train_test_split
39
 
 
 
40
  import gradio as gr
41
+ warnings.filterwarnings("ignore")
42
 
43
+ # ====== Transformers ======
44
+ import transformers as tf
45
+ from transformers import (
46
+ AutoTokenizer, AutoModelForCausalLM,
47
+ Trainer, TrainingArguments, EarlyStoppingCallback
48
+ )
49
+
50
+ # ====== RAG stack ======
 
 
 
 
 
 
 
 
 
 
 
 
51
  import chromadb
52
  from chromadb.config import Settings
53
+ from rank_bm25 import BM25Okapi
54
+ from sentence_transformers import CrossEncoder, SentenceTransformer, util as st_util
55
+
56
+ # ---- Monkeypatch Chroma telemetry (fallback) ----
57
+ try:
58
+ import chromadb.telemetry as _ctel
59
+ try: _ctel.client = None
60
+ except Exception: pass
61
+ for _n in ("capture", "capture_event"):
62
+ if hasattr(_ctel, _n):
63
+ try: setattr(_ctel, _n, lambda *a, **k: None)
64
+ except Exception: pass
65
+ if hasattr(_ctel, "Telemetry"):
66
+ try: _ctel.Telemetry().capture = lambda *a, **k: None
67
+ except Exception: pass
68
+ except Exception:
69
+ pass
70
+ # -------------------------------------------------
71
+
72
+ # ========= Persian normalization =========
73
+ ZWNJ = "\u200c"
74
+ AR_DIGITS = "٠١٢٣٤٥٦٧٨٩"
75
+ FA_DIGITS = "۰۱۲۳۴۵۶۷۸۹"
76
+ EN_DIGITS = "0123456789"
77
+
78
+ def normalize_fa(s: str) -> str:
79
+ if not s:
80
+ return s
81
+ s = s.replace("\u064A", "ی").replace("\u0643", "ک")
82
+ s = re.sub(r"[\u064B-\u065F\u0610-\u061A]", "", s)
83
+ trans = {ord(a): e for a, e in zip(AR_DIGITS + FA_DIGITS, EN_DIGITS * 2)}
84
+ s = s.translate(trans)
85
+ s = re.sub(r"\s*‌\s*", ZWNJ, s)
86
+ s = re.sub(r"\s+", " ", s).strip()
87
+ return s
88
+
89
+ # ==========================
90
+ # Configs
91
+ # ==========================
92
+ @dataclass
93
+ class ModelConfig:
94
+ model_name: str = "Qwen/Qwen2.5-7B-Instruct"
95
+ max_input_length: int = 3072
96
+ max_new_tokens: int = 256
97
+ temperature: float = 0.7
98
+ top_p: float = 0.9
99
+ do_sample: bool = True
100
+ gradient_checkpointing: bool = True
101
+
102
+ @dataclass
103
+ class RAGConfig:
104
+ persist_dir: str = "./chroma_db"
105
+ collection: str = "legal_articles"
106
+ top_k: int = 6
107
+ similarity_threshold: float = 0.68
108
+ context_char_limit: int = 260
109
+ enable: bool = True
110
+ reranker_name: str = "Alibaba-NLP/gte-multilingual-reranker-base"
111
+
112
+ @dataclass
113
+ class TrainConfig:
114
+ base_model: str = "PartAI/Dorna-Llama3-8B-Instruct"
115
+ alt_model_1: str = "zpm/Llama-3.1-PersianQA"
116
+ hakim_model: str = "AI-Hoosh/HAKIM-7B"
117
+ hooshvareh_model: str = "HooshvareLab/llama-fa-7b-instruct"
118
+ output_dir: str = "./mahoon_causal_lora"
119
+ seed: int = 42
120
+ test_size: float = 0.1
121
+ epochs: int = 2
122
+ batch_size: int = 2
123
+ grad_accum: int = 4
124
+ lr: float = 2e-4
125
+ warmup_ratio: float = 0.03
126
+ weight_decay: float = 0.0
127
+ logging_steps: int = 50
128
+ eval_strategy: str = "epoch"
129
+ save_strategy: str = "epoch"
130
+ save_total_limit: int = 2
131
+ report_to: str = "wandb"
132
+ max_grad_norm: float = 1.0
133
+ use_4bit: bool = False
134
+ max_seq_len: int = 2048
135
 
136
+ @dataclass
137
+ class SystemConfig:
138
+ model: ModelConfig = field(default_factory=ModelConfig)
139
+ rag: RAGConfig = field(default_factory=RAGConfig)
140
+ train: TrainConfig = field(default_factory=TrainConfig)
141
 
142
+ # ==========================
143
+ # Helpers
144
+ # ==========================
145
+ def set_seed_all(seed: int = 42):
146
+ import random
147
+ random.seed(seed); np.random.seed(seed)
148
+ torch.manual_seed(seed)
149
+ if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
150
 
151
+ def bf16_supported():
152
+ return torch.cuda.is_available() and getattr(torch.cuda, "is_bf16_supported", lambda: False)()
153
+
154
+ def log_deps():
 
155
  try:
156
+ import accelerate, datasets
157
+ print("[deps]",
158
+ f"python={sys.version.split()[0]}",
159
+ f"transformers={tf.__version__}",
160
+ f"accelerate={accelerate.__version__}",
161
+ f"datasets={datasets.__version__}",
162
+ f"gradio={gr.__version__}",
163
+ flush=True)
164
+ except Exception as e:
165
+ print("[deps] warn:", e, flush=True)
166
 
167
+ # ==========================
168
+ # Role gating helpers
169
+ # ==========================
170
+ def _get_username(request: gr.Request) -> str | None:
171
  try:
172
+ return getattr(request, "username", None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  except Exception:
174
+ return None
175
+
176
+ def is_admin(request: gr.Request) -> bool:
177
+ uname = _get_username(request)
178
+ if not uname:
179
+ return False
180
+ author = os.getenv("SPACE_AUTHOR_NAME", "").strip()
181
+ allow = {u.strip() for u in os.getenv("ADMIN_USERS", "").split(",") if u.strip()}
182
+ return (uname == author) or (uname in allow)
183
+
184
+ # ==========================
185
+ # RAG: Chroma + BM25 + CrossEncoder reranker
186
+ # ==========================
187
+ class LegalRAG:
188
+ def __init__(self, cfg: RAGConfig):
189
+ self.cfg = cfg
190
+ self.client = None
191
+ self.collection = None
192
+ self.reranker: Optional[CrossEncoder] = None
193
+ self.bm25 = None
194
+ self.bm25_ids: List[str] = []
195
+ self.bm25_path = str(Path(self.cfg.persist_dir) / "bm25.pkl")
196
+
197
+ def init(self):
198
+ Path(self.cfg.persist_dir).mkdir(parents=True, exist_ok=True)
199
+ self.client = chromadb.PersistentClient(
200
+ path=self.cfg.persist_dir,
201
+ settings=Settings(anonymized_telemetry=False)
202
+ )
203
+ try:
204
+ self.collection = self.client.get_or_create_collection(self.cfg.collection)
205
+ except Exception:
206
+ try: self.collection = self.client.get_collection(self.cfg.collection)
207
+ except Exception: self.collection = self.client.create_collection(self.cfg.collection)
208
+
209
+ try:
210
+ self.reranker = CrossEncoder(self.cfg.reranker_name, device="cpu")
211
+ except Exception:
212
+ self.reranker = None
213
+
214
+ if Path(self.bm25_path).exists():
215
+ with open(self.bm25_path, "rb") as f:
216
+ obj = pickle.load(f)
217
+ self.bm25 = obj["bm25"]; self.bm25_ids = obj["ids"]
218
+
219
+ def _rebuild_bm25(self, ids: List[str], docs: List[str]):
220
+ corpus = [normalize_fa(d).split() for d in docs]
221
+ self.bm25 = BM25Okapi(corpus)
222
+ self.bm25_ids = ids
223
+ with open(self.bm25_path, "wb") as f:
224
+ pickle.dump({"bm25": self.bm25, "ids": self.bm25_ids}, f)
225
+
226
+ def index_jsonl(self, jsonl_path: str, id_key="article_id", text_key="text"):
227
+ if not self.collection: self.init()
228
+
229
+ seen: Dict[str, int] = {}
230
+ ids, docs, metas = [], [], []
231
+
232
+ def _norm_id(x: str) -> str:
233
+ x = x or ""
234
+ x = x.replace("\u064A", "ی").replace("\u0643", "ک")
235
+ trans = {ord(a): e for a, e in zip("٠١٢٣٤٥٦٧٨٩۰۱۲۳۴۵۶۷۸۹", "01234567890123456789")}
236
+ x = x.translate(trans)
237
+ x = re.sub(r"\s+", "", x)
238
+ return x
239
+
240
+ with open(jsonl_path, "r", encoding="utf-8") as f:
241
+ for i, line in enumerate(f):
242
+ s = line.strip()
243
+ if not s: continue
244
+ try: obj = json.loads(s)
245
+ except: continue
246
+
247
+ raw_id = str(obj.get(id_key, f"auto_{i}"))
248
+ base_id = _norm_id(raw_id)
249
+ txt = normalize_fa(str(obj.get(text_key, "")).strip())
250
+ if not txt: continue
251
+
252
+ if base_id in seen:
253
+ seen[base_id] += 1
254
+ uid = f"{base_id}__d{seen[base_id]}"
255
+ dupe_idx = seen[base_id]
256
+ else:
257
+ seen[base_id] = 1
258
+ uid = base_id
259
+ dupe_idx = 1
260
+
261
+ ids.append(uid); docs.append(txt); metas.append({"article_id": base_id, "dupe_idx": dupe_idx})
262
+
263
+ if not ids:
264
+ return "هیچ سندی برای ایندکس یافت نشد."
265
+
266
+ self.collection.upsert(ids=ids, documents=docs, metadatas=metas)
267
+ self._rebuild_bm25(ids, docs)
268
+
269
+ dup_count = sum(1 for _, c in seen.items() if c > 1)
270
+ return f"✅ {len(ids)} سند ایندکس شد (Dense+BM25). شناسه‌های تکراری: {dup_count} کلید (با پسوند __dN یکتا شدند)."
271
+
272
+ def retrieve(self, query: str) -> List[Dict]:
273
+ if not self.collection: return []
274
+ qn = normalize_fa(query)
275
+
276
+ # Dense
277
+ try:
278
+ res = self.collection.query(
279
+ query_texts=[qn],
280
+ n_results=max(self.cfg.top_k * 3, 20),
281
+ include=["documents", "metadatas", "distances"],
282
+ )
283
+ out = []
284
+ docs = res.get("documents", [[]])[0]
285
+ metas = res.get("metadatas", [[]])[0]
286
+ dists = res.get("distances", [[1.0]])[0]
287
+ for i, (doc, meta, dist) in enumerate(zip(docs, metas, dists)):
288
+ sim = 1.0 - float(dist)
289
+ out.append({"article_id": (meta or {}).get("article_id", f"unk_{i}"),
290
+ "text": doc, "similarity": sim})
291
+ except Exception:
292
+ out = []
293
+
294
+ # BM25
295
+ bm25_hits = []
296
+ if self.bm25 is not None and self.bm25_ids:
297
+ scores = self.bm25.get_scores(normalize_fa(qn).split())
298
+ idxs = np.argsort(scores)[::-1][:max(self.cfg.top_k * 3, 20)]
299
+ smax = float(scores.max() + 1e-8)
300
+ for j in idxs:
301
+ aid = self.bm25_ids[int(j)]
302
+ try:
303
+ got = self.collection.get(ids=[aid])
304
+ tdoc = got["documents"][0]
305
+ except Exception:
306
+ tdoc = ""
307
+ bm25_hits.append({"article_id": aid, "text": tdoc, "similarity": float(scores[j]) / smax})
308
+
309
+ # merge
310
+ pool: Dict[str, Dict] = {}
311
+ for a in out + bm25_hits:
312
+ if a["article_id"] not in pool or a.get("similarity", 0) > pool[a["article_id"]].get("similarity", 0):
313
+ pool[a["article_id"]] = a
314
+ merged = [a for a in pool.values() if a.get("text") and len(a["text"]) > 15]
315
+ merged = [a for a in merged if a.get("similarity", 0) >= self.cfg.similarity_threshold]
316
+
317
+ # rerank (GPU only during predict)
318
+ if merged and self.reranker:
319
+ pairs = [(qn, a["text"]) for a in merged]
320
+ try:
321
+ with spaces.GPU(duration=30):
322
+ scores = self.reranker.predict(pairs)
323
+ except Exception:
324
+ scores = self.reranker.predict(pairs)
325
+ for a, s in zip(merged, scores): a["score"] = float(s)
326
+ merged = sorted(merged, key=lambda x: x.get("score", 0), reverse=True)[: self.cfg.top_k]
327
+ else:
328
+ merged = sorted(merged, key=lambda x: x.get("similarity", 0), reverse=True)[: self.cfg.top_k]
329
+ return merged
330
+
331
+ def build_context(self, arts: List[Dict]) -> str:
332
+ if not arts: return ""
333
+ bullets = [f"• ماده {a['article_id']}: {a['text'][:self.cfg.context_char_limit]}..." for a in arts]
334
+ return "مواد مرتبط:\n" + "\n".join(bullets)
335
+
336
+ # ========= RAG bootstrap from repo =========
337
+ def parse_law_textfile_to_jsonl(txt_path: str, out_jsonl: str):
338
+ pat = re.compile(r"(?:ماده|مادّه)\s+(\d+)\s*[:\-–]\s*(.+)")
339
+ rows = []
340
+ with open(txt_path, "r", encoding="utf-8") as f:
341
+ for line in f:
342
+ s = line.strip()
343
+ if not s: continue
344
+ m = pat.match(s)
345
+ if not m: continue
346
+ aid = m.group(1); body = m.group(2).strip()
347
+ if len(body) < 12: continue
348
+ rows.append({"article_id": aid, "text": normalize_fa(body)})
349
+ if not rows: raise RuntimeError("هیچ ماده‌ای با الگوی تعریف‌شده پیدا نشد.")
350
+ with open(out_jsonl, "w", encoding="utf-8") as g:
351
+ for r in rows: g.write(json.dumps(r, ensure_ascii=False) + "\n")
352
+ return len(rows)
353
+
354
+ def ensure_chroma_ready(persist_dir="./chroma_db", collection="legal_articles") -> str:
355
+ Path(persist_dir).mkdir(parents=True, exist_ok=True)
356
+ if any(Path(persist_dir).glob("*")):
357
+ return f"ChromaDB موجود است."
358
+ zip_path = Path("./chroma_legal_db.zip")
359
+ if zip_path.exists():
360
+ try:
361
+ with zipfile.ZipFile(zip_path, "r") as z: z.extractall(persist_dir)
362
+ return "ChromaDB از zip بازیابی شد."
363
+ except Exception: pass
364
+ txt_path = Path("./all_legal_sentences.txt")
365
+ if txt_path.exists():
366
+ n = parse_law_textfile_to_jsonl(str(txt_path), "./laws.jsonl")
367
+ rag_local = LegalRAG(RAGConfig(persist_dir=persist_dir, collection=collection))
368
+ rag_local.init()
369
+ msg = rag_local.index_jsonl("./laws.jsonl", id_key="article_id", text_key="text")
370
+ return f"از متن خام {n} رکورد استخراج شد. {msg}"
371
+ return "پایگاه RAG موجود نیست و منبع خامی هم برای ساخت پیدا نشد."
372
+
373
+ # ==========================
374
+ # Loader + Generator (Causal-only, ZeroGPU)
375
+ # ==========================
376
+ class CausalLoader:
377
+ def __init__(self, mcfg: ModelConfig):
378
+ self.cfg = mcfg
379
+ self.tokenizer = None
380
+ self.model = None
381
+
382
+ def load(self, model_name: str):
383
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
384
+ if self.tokenizer.pad_token is None and hasattr(self.tokenizer, "eos_token"):
385
+ self.tokenizer.pad_token = self.tokenizer.eos_token
386
+
387
+ try:
388
+ with spaces.GPU(duration=90):
389
+ kwargs = {"low_cpu_mem_usage": True}
390
+ if torch.cuda.is_available():
391
+ kwargs["device_map"] = "auto"
392
+ kwargs["torch_dtype"] = torch.bfloat16 if bf16_supported() else torch.float16
393
+ self.model = AutoModelForCausalLM.from_pretrained(model_name, **kwargs)
394
+ if self.cfg.gradient_checkpointing and hasattr(self.model, "gradient_checkpointing_enable"):
395
+ try: self.model.gradient_checkpointing_enable()
396
+ except Exception: pass
397
+ except Exception:
398
+ self.model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True)
399
+
400
+ return self
401
+
402
+ class Generator:
403
+ def __init__(self, loader: CausalLoader, mcfg: ModelConfig):
404
+ self.tk = loader.tokenizer
405
+ self.model = loader.model
406
+ self.cfg = mcfg
407
+
408
+ def generate(self, question: str, context: str = "", system_prompt: str = "You are a helpful Persian legal assistant.") -> str:
409
+ parts = []
410
+ if system_prompt: parts.append(f"<|system|>\n{system_prompt}")
411
+ if context: parts.append(f"<|system|>\nاز منابع زیر استفاده کن و استنادی پاسخ بده:\n{context}")
412
+ parts.append(f"<|user|>\n{question}")
413
+ prompt = "\n".join(parts) + "\n<|assistant|>\n"
414
+
415
+ enc = self.tk(prompt, return_tensors="pt", truncation=True, max_length=self.cfg.max_input_length)
416
+
417
+ try:
418
+ with spaces.GPU(duration=60):
419
+ dev_model = next(self.model.parameters()).device if hasattr(self.model, "parameters") else "cpu"
420
+ inputs = {k: v.to(dev_model) for k, v in enc.items()}
421
+ with torch.no_grad():
422
+ out = self.model.generate(
423
+ **inputs,
424
+ max_new_tokens=self.cfg.max_new_tokens,
425
+ do_sample=self.cfg.do_sample,
426
+ temperature=self.cfg.temperature,
427
+ top_p=self.cfg.top_p,
428
+ pad_token_id=self.tk.pad_token_id or self.tk.eos_token_id,
429
+ )
430
+ except Exception:
431
+ inputs = {k: v for k, v in enc.items()}
432
+ with torch.no_grad():
433
+ out = self.model.generate(
434
+ **inputs,
435
+ max_new_tokens=min(self.cfg.max_new_tokens, 256),
436
+ do_sample=self.cfg.do_sample,
437
+ temperature=self.cfg.temperature,
438
+ top_p=self.cfg.top_p,
439
+ pad_token_id=self.tk.pad_token_id or self.tk.eos_token_id,
440
+ )
441
+
442
+ return self.tk.decode(out[0], skip_special_tokens=True)
443
+
444
+ # ==========================
445
+ # Datasets & Trainer (Causal-only, W&B)
446
+ # ==========================
447
+ def read_jsonl_files(paths: List[str]) -> List[Dict]:
448
+ data: List[Dict] = []
449
+ for p in paths:
450
+ if not p: continue
451
+ with open(p, 'r', encoding='utf-8') as f:
452
+ for line in f:
453
+ s = line.strip()
454
+ if not s: continue
455
+ try: data.append(json.loads(s))
456
+ except json.JSONDecodeError: continue
457
+ return data
458
+
459
+ class CausalJSONLDataset(Dataset):
460
+ def __init__(self, data: List[Dict], tokenizer, max_len: int, rag: Optional[LegalRAG] = None, enhance_every:int = 8):
461
+ self.tk = tokenizer
462
+ self.max_len = max_len
463
+ self.items = []
464
+ for i, ex in enumerate(data):
465
+ src = normalize_fa(str(ex.get("input", "")).strip())
466
+ tgt = normalize_fa(str(ex.get("output", "")).strip())
467
+ if not src or not tgt: continue
468
+ ctx = ""
469
+ if rag and i % enhance_every == 0:
470
+ arts = rag.retrieve(src)
471
+ ctx = rag.build_context(arts)
472
+ text = ""
473
+ if ctx: text += f"<|system|>\nاز منابع زیر استفاده کن:\n{ctx}\n"
474
+ text += f"<|system|>\nYou are a helpful Persian legal assistant.\n"
475
+ text += f"<|user|>\n{src}\n<|assistant|>\n{tgt}"
476
+ self.items.append(text)
477
+
478
+ def __len__(self): return len(self.items)
479
+
480
+ def __getitem__(self, idx):
481
+ text = self.items[idx]
482
+ enc = self.tk(text, max_length=self.max_len, padding="max_length", truncation=True)
483
+ input_ids = torch.tensor(enc["input_ids"])
484
+ attn = torch.tensor(enc["attention_mask"])
485
+ labels = input_ids.clone(); labels[attn == 0] = -100
486
+ return {"input_ids": input_ids, "attention_mask": attn, "labels": labels}
487
+
488
+ def safe_training_args(**kwargs):
489
+ return TrainingArguments(**kwargs)
490
+
491
+ class TrainerManager:
492
+ def __init__(self, syscfg: SystemConfig, loader: CausalLoader):
493
+ self.cfg = syscfg
494
+ self.loader = loader
495
+
496
+ def train_causal(self, train_paths: List[str], use_rag: bool = True, use_wandb: bool = True,
497
+ wandb_project: str = "mahoon-legal-ai", wandb_entity: str = "", run_name: str = "mahoon_causal_lora"):
498
+ set_seed_all(self.cfg.train.seed)
499
+ data = read_jsonl_files(train_paths)
500
+ train, val = train_test_split(data, test_size=self.cfg.train.test_size, random_state=self.cfg.train.seed)
501
+
502
+ rag = LegalRAG(self.cfg.rag) if (use_rag and self.cfg.rag.enable) else None
503
+ if rag: rag.init()
504
+
505
+ ds_tr = CausalJSONLDataset(train, self.loader.tokenizer, self.cfg.train.max_seq_len, rag)
506
+ ds_va = CausalJSONLDataset(val, self.loader.tokenizer, self.cfg.train.max_seq_len, None)
507
+
508
+ fp16_ok = torch.cuda.is_available() and not bf16_supported()
509
+ bf16_ok = bf16_supported()
510
+
511
+ if use_wandb:
512
+ os.environ.setdefault("WANDB_PROJECT", wandb_project or "mahoon-legal-ai")
513
+ if wandb_entity: os.environ.setdefault("WANDB_ENTITY", wandb_entity)
514
+ os.environ.pop("WANDB_DISABLED", None)
515
+ else:
516
+ os.environ["WANDB_DISABLED"] = "true"
517
 
518
+ args = safe_training_args(
519
+ output_dir=self.cfg.train.output_dir,
520
+ num_train_epochs=self.cfg.train.epochs,
521
+ learning_rate=self.cfg.train.lr,
522
+ per_device_train_batch_size=self.cfg.train.batch_size,
523
+ per_device_eval_batch_size=self.cfg.train.batch_size,
524
+ gradient_accumulation_steps=self.cfg.train.grad_accum,
525
+ warmup_ratio=self.cfg.train.warmup_ratio,
526
+ weight_decay=self.cfg.train.weight_decay,
527
+ evaluation_strategy=self.cfg.train.eval_strategy,
528
+ save_strategy=self.cfg.train.save_strategy,
529
+ save_total_limit=self.cfg.train.save_total_limit,
530
+ load_best_model_at_end=True,
531
+ metric_for_best_model="eval_loss",
532
+ logging_steps=self.cfg.train.logging_steps,
533
+ report_to=(["wandb"] if use_wandb else ["none"]),
534
+ run_name=run_name,
535
+ fp16=fp16_ok, bf16=bf16_ok,
536
+ max_grad_norm=self.cfg.train.max_grad_norm,
537
+ )
538
+
539
+ callbacks = [EarlyStoppingCallback(early_stopping_patience=2)]
540
+ try:
541
+ if use_wandb:
542
+ from transformers.integrations import WandbCallback
543
+ callbacks.append(WandbCallback())
544
+ except Exception:
545
+ pass
546
+
547
+ trainer = Trainer(
548
+ model=self.loader.model,
549
+ args=args,
550
+ train_dataset=ds_tr,
551
+ eval_dataset=ds_va,
552
+ tokenizer=self.loader.tokenizer,
553
+ callbacks=callbacks,
554
+ )
555
+
556
+ if use_wandb:
557
+ try:
558
+ import wandb
559
+ wandb.init(project=os.getenv("WANDB_PROJECT", "mahoon-legal-ai"),
560
+ entity=os.getenv("WANDB_ENTITY"),
561
+ name=run_name,
562
+ config={
563
+ "base_model": self.loader.model.name_or_path,
564
+ "epochs": self.cfg.train.epochs,
565
+ "batch": self.cfg.train.batch_size,
566
+ "grad_accum": self.cfg.train.grad_accum,
567
+ "lr": self.cfg.train.lr,
568
+ "max_seq_len": self.cfg.train.max_seq_len,
569
+ "use_rag": use_rag,
570
+ })
571
+ except Exception:
572
+ pass
573
+
574
+ trainer.train()
575
+ trainer.save_model(self.cfg.train.output_dir)
576
+ self.loader.tokenizer.save_pretrained(self.cfg.train.output_dir)
577
+
578
+ if use_wandb:
579
+ try:
580
+ import wandb
581
+ art = wandb.Artifact("mahoon-model", type="model")
582
+ art.add_dir(self.cfg.train.output_dir)
583
+ wandb.log_artifact(art)
584
+ wandb.finish()
585
+ except Exception:
586
+ pass
587
+
588
+ # ==========================
589
+ # Dataset utilities (Cleaner/Deduper)
590
+ # ==========================
591
+ def deduplicate_jsonl(in_path: str, out_path: str, sim_threshold: float = 0.90, text_keys=("input","output")) -> int:
592
+ rows = []
593
+ with open(in_path, "r", encoding="utf-8") as f:
594
+ for line in f:
595
+ s = line.strip()
596
+ if not s: continue
597
+ try: obj = json.loads(s)
598
+ except: continue
599
+ for k in text_keys:
600
+ if k in obj: obj[k] = normalize_fa(str(obj[k]))
601
+ rows.append(obj)
602
+ if not rows: raise RuntimeError("هیچ رکورد معتبری در ورودی نبود.")
603
+ model = SentenceTransformer("sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")
604
+ embs = model.encode([r.get("input","") for r in rows], convert_to_tensor=True, show_progress_bar=False, normalize_embeddings=True)
605
+ kept, seen = [], torch.zeros(len(rows), dtype=torch.bool)
606
+ for i in range(len(rows)):
607
+ if seen[i]: continue
608
+ sims = st_util.cos_sim(embs[i], embs)[0]
609
+ dup_idx = (sims >= sim_threshold).nonzero(as_tuple=True)[0].tolist()
610
+ for j in dup_idx: seen[j] = True
611
+ kept.append(rows[i])
612
+ with open(out_path, "w", encoding="utf-8") as g:
613
+ for r in kept: g.write(json.dumps(r, ensure_ascii=False) + "\n")
614
+ return len(kept)
615
+
616
+ # ==========================
617
+ # App (Gradio) + Role Gating
618
+ # ==========================
619
+ class LegalApp:
620
+ def __init__(self, scfg: Optional[SystemConfig] = None):
621
+ self.scfg = scfg or SystemConfig()
622
+ self.rag = LegalRAG(self.scfg.rag)
623
+ self.loader: Optional[CausalLoader] = None
624
+ self.gen: Optional[Generator] = None
625
+
626
+ def _file_paths(self, files: List[gr.File]) -> List[str]:
627
+ paths = []
628
+ for f in (files or []):
629
+ p = getattr(f, "name", None) or getattr(f, "path", None)
630
+ if p: paths.append(p)
631
+ return paths
632
+
633
+ # Core (مشاوره/لود آزاد است)
634
+ def load(self, model_name: str):
635
+ self.loader = CausalLoader(self.scfg.model).load(model_name)
636
+ self.gen = Generator(self.loader, self.scfg.model)
637
  # RAG
638
+ msg_rag = "RAG غیرفعال"
639
+ if self.scfg.rag.enable:
640
+ try:
641
+ self.rag = LegalRAG(self.scfg.rag); self.rag.init()
642
+ msg_rag = "RAG آماده است"
643
+ except Exception as e:
644
+ msg_rag = f"RAG خطا: {e}"
645
+ return f"مدل بارگذاری شد: {model_name}\n{msg_rag}"
646
+
647
+ # --- گیت سمت‌سرور: فقط ادمین ---
648
+ def build_index(self, laws_file: gr.File, id_key: str, text_key: str, request: gr.Request):
649
+ if not is_admin(request):
650
+ return "🔒 این عملیات فقط برای مدیران فعال است."
651
+ if not self.scfg.rag.enable: return "RAG غیرفعال است."
652
+ try:
653
+ self.rag.init()
654
+ p = getattr(laws_file, "name", None) or getattr(laws_file, "path", None)
655
+ if not p: return "فایل قوانین معتبر نیست."
656
+ return self.rag.index_jsonl(p, id_key=id_key, text_key=text_key)
657
+ except Exception as e:
658
+ return f"خطا در ایندکس: {e}"
659
+
660
+ def build_dataset(self, raw_file, text_key: str, model_ckpt: str, batch_size: int, max_samples: int | None, request: gr.Request):
661
+ if not is_admin(request):
662
+ return None, "🔒 این عملیات فقط برای مدیران فعال است."
663
+ try:
664
+ from golden_builder import load_json_or_jsonl, save_jsonl, GoldenBuilder
665
+ except Exception as e:
666
+ return None, f"❌ golden_builder.py یافت نشد/قابل import نیست: {e}"
667
+ path = getattr(raw_file, "name", None) or getattr(raw_file, "path", None)
668
+ if not path: return None, "⚠️ فایل ورودی معتبر نیست."
669
+ try:
670
+ data = load_json_or_jsonl(path)
671
+ if max_samples and int(max_samples) > 0: data = data[:int(max_samples)]
672
+ gb = GoldenBuilder(model_name=model_ckpt)
673
+ rows = gb.build(data, text_key=text_key, batch_size=int(batch_size))
674
+ out_dir = "/tmp/mahoon_datasets"; Path(out_dir).mkdir(parents=True, exist_ok=True)
675
+ out_path = f"{out_dir}/golden_{os.path.basename(path)}.jsonl"
676
+ save_jsonl(rows, out_path)
677
+ return out_path, f"✅ {len(rows)} رکورد تولید شد."
678
+ except Exception as e:
679
+ return None, f"❌ خطا در ساخت دیتاست: {e}"
680
+
681
+ def train(self, model_name: str, files: List[gr.File], use_rag: bool, epochs: int, batch: int, lr: float,
682
+ use_wandb: bool, wandb_project: str, wandb_entity: str, run_name: str,
683
+ progress=gr.Progress(track_tqdm=True), request: gr.Request = None):
684
+ if not is_admin(request):
685
+ return "🔒 این عملیات فقط برای مدیران فعال است."
686
+ progress(0.05, desc="راه‌اندازی")
687
+ self.scfg.train.epochs = int(epochs)
688
+ self.scfg.train.batch_size = int(batch)
689
+ self.scfg.train.lr = float(lr)
690
+
691
+ progress(0.10, desc="بارگذاری مدل/توکنایزر")
692
+ self.loader = CausalLoader(self.scfg.model).load(model_name)
693
+
694
+ paths = self._file_paths(files)
695
+ if not paths: return "⚠️ هیچ فایل JSONL برای آموزش انتخاب نشده."
696
+
697
+ tm = TrainerManager(self.scfg, self.loader)
698
+ set_seed_all(self.scfg.train.seed)
699
+
700
+ progress(0.30, desc="آماده‌سازی دیتاست‌ها و RAG (اختیاری)")
701
+ tm.train_causal(
702
+ paths, use_rag=use_rag, use_wandb=use_wandb,
703
+ wandb_project=wandb_project, wandb_entity=wandb_entity, run_name=run_name
704
  )
705
+
706
+ progress(0.95, desc="ذخیرهٔ آرتیفکت‌ها")
707
+ return f"✅ آموزش کامل شد و در {self.scfg.train.output_dir} ذخیره شد."
708
+
709
+ def run_weight_tune(self, f, tk, ms, runs, bs, proj, ent, request: gr.Request):
710
+ if not is_admin(request):
711
+ return "🔒 این عملیات فقط برای مدیران فعال است."
712
+ p = getattr(f, "name", None) or getattr(f, "path", None)
713
+ if not p:
714
+ return "⚠️ فایل داده نامعتبر است."
715
+ try:
716
+ from weights_sweep import run_sweep
717
+ except Exception as e:
718
+ return f"❌ weights_sweep.py یافت نشد/قابل import نیست: {e}"
719
+ os.environ.setdefault("WANDB_PROJECT", proj or "mahoon-legal-ai")
720
+ if ent: os.environ.setdefault("WANDB_ENTITY", ent)
721
+ try:
722
+ run_sweep(data_path=p, text_key=tk, max_samples=int(ms), batch_size=int(bs),
723
+ project=proj, entity=ent, count=int(runs))
724
+ return "✅ Sweep اجرا شد. بهترین Run را در W&B بررسی و وزن‌ها را تثبیت کنید."
725
+ except Exception as e:
726
+ return f"❌ خطا در اجرای Sweep: {e}"
727
+
728
+ def apply_best_weights(self, wandb_project: str, wandb_entity: str, metric: str = "pass_rate", request: gr.Request = None):
729
+ if request is not None and not is_admin(request):
730
+ return "🔒 این عملیات فقط برای مدیران فعال است."
731
+ try:
732
+ import wandb, json as _json
733
+ except Exception as e:
734
+ return f"❌ W&B در محیط در دسترس نیست: {e}"
735
+
736
+ try:
737
+ api = wandb.Api()
738
+ proj_path = f"{wandb_entity}/{wandb_project}" if wandb_entity else wandb_project
739
+ runs = api.runs(proj_path, filters={"state": "finished"})
740
+ except Exception as e:
741
+ return f"❌ عدم دسترسی به پروژه W&B ({wandb_project}): {e}"
742
+
743
+ best_run = None; best_val = float("-inf")
744
+ for r in runs:
745
+ s = r.summary or {}
746
+ if "weights" in s and metric in s:
747
+ try: val = float(s[metric])
748
+ except Exception: continue
749
+ if val > best_val: best_val, best_run = val, r
750
+
751
+ if not best_run:
752
+ return "⚠️ هیچ Run واجد شرایطی با summary['weights'] و متریک موردنظر پیدا نشد."
753
+
754
+ weights = best_run.summary.get("weights", {})
755
+ if not isinstance(weights, dict) or not weights:
756
+ return "⚠️ فرمت وزن‌های بهترین Run نامعتبر است."
757
+
758
+ try:
759
+ with open("legal_entity_weights.json", "w", encoding="utf-8") as f:
760
+ _json.dump(weights, f, ensure_ascii=False, indent=2)
761
+ except Exception as e:
762
+ return f"❌ خطا در نوشتن legal_entity_weights.json: {e}"
763
+
764
+ rid = getattr(best_run, "id", "unknown")
765
+ return f"✅ وزن‌ها اعمال شد از Run `{rid}` با {metric}={best_val:.4f}. فایل: `legal_entity_weights.json`"
766
+
767
+ # Consultation (عمومی)
768
+ def answer(self, question: str, system_prompt: str, use_rag: bool, max_new_tokens: int, temperature: float, top_p: float):
769
+ if not question.strip(): return "لطفاً سوال خود را وارد کنید.", ""
770
+ if not self.gen: return "ابتدا مدل را بارگذاری کنید.", ""
771
+ self.scfg.model.max_new_tokens = int(max_new_tokens)
772
+ self.scfg.model.temperature = float(temperature)
773
+ self.scfg.model.top_p = float(top_p)
774
+
775
+ arts = self.rag.retrieve(question) if (use_rag and self.scfg.rag.enable and self.rag.collection) else []
776
+ max_refs = 4
777
+ if arts: arts = arts[:max_refs]
778
+ ctx = self.rag.build_context(arts) if arts else ""
779
+ ans = self.gen.generate(question, ctx, system_prompt)
780
 
781
  refs = ""
782
  if arts:
783
+ refs = "\n\n" + "\n".join([f"**ماده {a['article_id']}** (شباهت: {a.get('similarity',0):.2f})\n{a['text'][:320]}..." for a in arts])
784
+ return ans, refs
785
+
786
+ # UI
787
+ def build_ui(self):
788
+ log_deps()
789
+ try:
790
+ print("[rag-bootstrap]", ensure_chroma_ready(self.scfg.rag.persist_dir, self.scfg.rag.collection), flush=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
791
  except Exception as e:
792
+ print("[rag-bootstrap] error:", e, flush=True)
793
+
794
+ default_gen_models = {
795
+ "Qwen2.5-7B Instruct": "Qwen/Qwen2.5-7B-Instruct",
796
+ "Llama-3.1-8B Instruct": "meta-llama/Llama-3.1-8B-Instruct",
797
+ "Mistral-7B Instruct (v0.3)": "mistralai/Mistral-7B-Instruct-v0.3",
798
+ }
799
+
800
+ with gr.Blocks(title="ماحون — مشاور حقوقی (Causal-only, ZeroGPU)") as app:
801
+ # بنر نقش
802
+ role_banner = gr.Markdown()
803
+
804
+ gr.Markdown("""
805
+ <div style='text-align:center;padding:18px'>
806
+ <h1 style='margin-bottom:4px'>ماحون — Persian Legal (Causal-only, ZeroGPU)</h1>
807
+ <p style='color:#666'>Hybrid RAG • Qwen/Llama/Mistral • Dataset Ops • W&B Training • Weight Tuning</p>
808
+ </div>
809
+ """)
810
+
811
+ # --- Tab: Consultation (interactive for all) ---
812
+ with gr.Tab("مشاوره"):
813
+ with gr.Row():
814
+ gen_model_dd = gr.Dropdown(choices=list(default_gen_models.keys()), value="Qwen2.5-7B Instruct", label="مدل تولید")
815
+ gen_model_id = gr.Textbox(value=default_gen_models["Qwen2.5-7B Instruct"], label="Model ID (قابل ویرایش)")
816
+ with gr.Row():
817
+ use_rag = gr.Checkbox(value=True, label="RAG فعال باشد؟")
818
+ persist_dir = gr.Textbox(value=self.scfg.rag.persist_dir, label="مسیر ChromaDB")
819
+ collection = gr.Textbox(value=self.scfg.rag.collection, label="نام کالکشن")
820
+ with gr.Row():
821
+ top_k = gr.Slider(1, 15, value=self.scfg.rag.top_k, step=1, label="Top-K")
822
+ threshold = gr.Slider(0.3, 0.95, value=self.scfg.rag.similarity_threshold, step=0.01, label="آستانه شباهت")
823
+ load_btn = gr.Button("بارگذاری مدل", variant="primary")
824
+ status = gr.Textbox(label="وضعیت", interactive=False)
825
+
826
+ with gr.Accordion("پارامترهای تولید", open=False):
827
+ system_prompt = gr.Textbox(value="You are a helpful Persian legal assistant.", label="System prompt")
828
+ max_new_tokens = gr.Slider(64, 2048, value=self.scfg.model.max_new_tokens, step=16, label="max_new_tokens")
829
+ temperature = gr.Slider(0.0, 1.5, value=self.scfg.model.temperature, step=0.05, label="temperature")
830
+ top_p = gr.Slider(0.1, 1.0, value=self.scfg.model.top_p, step=0.05, label="top_p")
831
+
832
+ question = gr.Textbox(lines=3, label="سوال حقوقی")
833
+ gr.Examples(
834
+ examples=[
835
+ ["در صورت نقض قرارداد EPC چه راهکارهای حقوقی دارم؟"],
836
+ ["آیا درج شرط عدم رقابت در قرارداد کار قانونی است؟"],
837
+ ["حق و حقوق کارگر در صورت اخراج فوری چیست؟"],
838
+ ],
839
+ inputs=question, label="نمونه پرسش‌ها"
840
+ )
841
+ ask_btn = gr.Button("پرسش", variant="primary")
842
+ answer = gr.Markdown(label="پاسخ"); refs = gr.Markdown(label="مواد قانونی مرتبط")
843
+
844
+ # --- Tab: Indexing (view-only for visitors) ---
845
+ with gr.Tab("ایندکس قوانین"):
846
+ gr.Markdown("فایل JSONL قوانین را بارگذاری و ایندکس کنید (کلیدها: `article_id`, `text`).")
847
+ laws_file = gr.File(label="فایل JSONL قوانین", file_types=[".jsonl"])
848
+ id_key = gr.Textbox(value="article_id", label="کلید شناسه ماده")
849
+ text_key = gr.Textbox(value="text", label="کلید متن ماده")
850
+ index_btn = gr.Button("ایندکس‌سازی قوانین"); index_status = gr.Textbox(label="وضعیت ایندکس", interactive=False)
851
+ index_widgets = [laws_file, id_key, text_key, index_btn]
852
+
853
+ # --- Tab: Dataset Builder (view-only for visitors) ---
854
+ with gr.Tab("ساخت دیتاست"):
855
+ gr.Markdown("فایل خام (JSON/JSONL) → خروجی JSONL سازگار با `{input, output}` (از golden_builder).")
856
+ raw_file = gr.File(label="فایل خام", file_types=[".json",".jsonl"])
857
+ with gr.Row():
858
+ ds_text_key = gr.Textbox(value="متن_کامل", label="کلید متن (text_key)")
859
+ model_ckpt = gr.Dropdown(
860
+ choices=["google/mt5-base", "google/flan-t5-base", "t5-base"],
861
+ value="google/mt5-base",
862
+ label="مدل خلاصه‌ساز برای ساخت دیتاست (فقط Builder)"
863
+ )
864
+ with gr.Row():
865
+ ds_batch_size = gr.Slider(1, 16, value=4, step=1, label="Batch size")
866
+ max_samples = gr.Number(value=0, label="حداکثر نمونه (۰=همه)")
867
+ build_btn = gr.Button("ساخت دیتاست", variant="primary")
868
+ out_file = gr.File(label="دانلود خروجی JSONL", interactive=False)
869
+ build_status = gr.Textbox(label="وضعیت", interactive=False)
870
+ builder_widgets = [raw_file, ds_text_key, model_ckpt, ds_batch_size, max_samples, build_btn]
871
+
872
+ # --- Tab: Dataset Cleaning (view-only for visitors) ---
873
+ with gr.Tab("پاکسازی دیتاست"):
874
+ gr.Markdown("نرمال‌سازی فارسی + حذف تکراری‌های معنایی (cosine). ورودی: JSONL `{input, output}`.")
875
+ raw_ds = gr.File(label="JSONL ورودی", file_types=[".jsonl"])
876
+ sim_th = gr.Slider(0.80, 0.98, value=0.90, step=0.01, label="آستانه شباهت (cosine)")
877
+ clean_btn = gr.Button("اجرای پاکسازی", variant="primary")
878
+ cleaned_out = gr.File(label="دانلود JSONL پاک", interactive=False)
879
+ clean_status = gr.Markdown()
880
+ clean_widgets = [raw_ds, sim_th, clean_btn]
881
+
882
+ # --- Tab: Training (view-only for visitors) ---
883
+ with gr.Tab("آموزش"):
884
+ gr.Markdown("SFT/LoRA روی مدل‌های causal (فقط `{input, output}`) + W&B logging.")
885
+ with gr.Row():
886
+ model_train_dd = gr.Dropdown(
887
+ choices=[
888
+ "HAKIM (Editable ID below)",
889
+ "Hooshvareh (Editable ID below)",
890
+ "Dorna-Llama3-8B",
891
+ "PersianQA-8B",
892
+ "Custom (Editable ID below)"
893
+ ],
894
+ value="HAKIM (Editable ID below)", label="پروفایل مدل"
895
+ )
896
+ model_train_id = gr.Textbox(value="AI-Hoosh/HAKIM-7B", label="HF Model ID (قابل ویرایش)")
897
+ use_rag_train = gr.Checkbox(value=True, label="RAG-enhanced Training")
898
+
899
+ use_wandb = gr.Checkbox(value=True, label="W&B logging فعال باشد؟")
900
+ wandb_project = gr.Textbox(value="mahoon-legal-ai", label="WANDB_PROJECT")
901
+ wandb_entity = gr.Textbox(value="", label="WANDB_ENTITY (اختیاری)")
902
+ run_name = gr.Textbox(value="mahoon_causal_lora", label="Run name")
903
+ gr.Markdown("راهنما: در Settings → Secrets مقدار `WANDB_API_KEY` را تنظیم کنید (مقدار واقعی).")
904
+
905
+ train_files = gr.Files(label="JSONL Files", file_count="multiple", file_types=[".jsonl"])
906
+ with gr.Row():
907
+ epochs = gr.Slider(1, 6, value=2, step=1, label="epochs")
908
+ batch = gr.Slider(1, 8, value=2, step=1, label="batch per device")
909
+ lr = gr.Number(value=2e-4, label="learning rate")
910
+ train_btn = gr.Button("شروع آموزش", variant="primary")
911
+ train_status = gr.Textbox(label="وضعیت آموزش", interactive=False)
912
+ train_widgets = [model_train_dd, model_train_id, use_rag_train, use_wandb, wandb_project, wandb_entity,
913
+ run_name, train_files, epochs, batch, lr, train_btn]
914
+
915
+ # --- Tab: Weight Tuning (view-only for visitors) ---
916
+ with gr.Tab("Weight Tuning"):
917
+ gr.Markdown("تیون خودکار وزن‌های موجودیت با W&B Sweep. ابتدا در Settings→Secrets مقدار `WANDB_API_KEY` را ست کنید.")
918
+ tune_file = gr.File(label="فایل داده (JSON/JSONL)", file_types=[".json",".jsonl"])
919
+ tune_text_key = gr.Textbox(value="متن_کامل", label="کلید متن")
920
+ tune_max_samples = gr.Slider(50, 400, value=120, step=10, label="حداکثر نمونه")
921
+ tune_runs = gr.Slider(4, 64, value=16, step=4, label="تعداد ران Sweep")
922
+ tune_batch = gr.Slider(1, 4, value=2, step=1, label="batch size Builder")
923
+ tune_proj = gr.Textbox(value="mahoon-legal-ai", label="WANDB_PROJECT")
924
+ tune_entity = gr.Textbox(value="", label="WANDB_ENTITY (اختیاری)")
925
+ run_tune = gr.Button("شروع Sweep", variant="primary")
926
+ tune_status = gr.Markdown()
927
+
928
+ gr.Markdown("---")
929
+ gr.Markdown("اعمال خودکار بهترین وزن‌ها از داشبورد W&B (بر اساس بالاترین `pass_rate`).")
930
+ metric_dd = gr.Dropdown(choices=["pass_rate"], value="pass_rate", label="متریک انتخاب بهترین Run")
931
+ apply_btn = gr.Button("اعمال بهترین وزن‌ها از W&B", variant="secondary")
932
+ tuning_widgets = [tune_file, tune_text_key, tune_max_samples, tune_runs, tune_batch,
933
+ tune_proj, tune_entity, run_tune, metric_dd, apply_btn]
934
+
935
+ # ---- Events (مشاوره آزاد / عملیاتِ ادمینی با گیت) ----
936
+ def _resolve_gen(choice: str, override: str) -> str:
937
+ return override.strip() if override.strip() else default_gen_models[choice]
938
+
939
+ def _on_load(choice, override, rag, pdir, coll, k, th):
940
+ self.scfg.rag.enable = bool(rag)
941
+ self.scfg.rag.persist_dir = pdir
942
+ self.scfg.rag.collection = coll
943
+ self.scfg.rag.top_k = int(k)
944
+ self.scfg.rag.similarity_threshold = float(th)
945
+ return self.load(_resolve_gen(choice, override))
946
+
947
+ def _whoami(request: gr.Request):
948
+ u = _get_username(request) or "Visitor"
949
+ return f"👤 کاربر: **{u}** — دسترسی: {'مدیریتی' if is_admin(request) else 'بازدیدکننده (فقط مشاهده)'}"
950
+
951
+ load_btn.click(_on_load,
952
+ inputs=[gen_model_dd, gen_model_id, use_rag, persist_dir, collection, top_k, threshold],
953
+ outputs=status)
954
+
955
+ ask_btn.click(self.answer,
956
+ inputs=[question, system_prompt, use_rag, max_new_tokens, temperature, top_p],
957
+ outputs=[answer, refs])
958
+
959
+ # ادمینی: استفاده از request injection (Gradio به‌طور خودکار تزریق می‌کند)
960
+ def _index_handler(f, ik, tk, request: gr.Request):
961
+ return self.build_index(f, ik, tk, request)
962
+ index_btn.click(_index_handler, inputs=[laws_file, id_key, text_key], outputs=index_status)
963
+
964
+ def _build_ds_handler(rf, tk, ckpt, bs, mx, request: gr.Request):
965
+ return self.build_dataset(rf, tk, ckpt, bs, mx, request)
966
+ build_btn.click(_build_ds_handler,
967
+ inputs=[raw_file, ds_text_key, model_ckpt, ds_batch_size, max_samples],
968
+ outputs=[out_file, build_status])
969
+
970
+ def _train_handler(prof, mid, files, rg, e, b, l, uw, wp, we, rn, request: gr.Request):
971
+ def _map_profile_to_id(profile: str, current_id: str) -> str:
972
+ if current_id.strip(): return current_id.strip()
973
+ if "Dorna" in profile: return "PartAI/Dorna-Llama3-8B-Instruct"
974
+ if "PersianQA" in profile: return "zpm/Llama-3.1-PersianQA"
975
+ if "HAKIM" in profile: return "AI-Hoosh/HAKIM-7B"
976
+ if "Hooshvareh" in profile: return "HooshvareLab/llama-fa-7b-instruct"
977
+ return "PartAI/Dorna-Llama3-8B-Instruct"
978
+ model_id = _map_profile_to_id(prof, mid)
979
+ return self.train(model_id, files, rg, e, b, l, uw, wp, we, rn, request=request)
980
+ train_btn.click(_train_handler,
981
+ inputs=[model_train_dd, model_train_id, train_files, use_rag_train, epochs, batch, lr,
982
+ use_wandb, wandb_project, wandb_entity, run_name],
983
+ outputs=train_status)
984
+
985
+ def _clean_handler(f, th):
986
+ p = getattr(f, "name", None) or getattr(f, "path", None)
987
+ if not p: return None, "⚠️ فایل نامعتبر."
988
+ outp = f"/tmp/cleaned_{int(time.time())}.jsonl"
989
+ n = deduplicate_jsonl(p, outp, sim_threshold=float(th))
990
+ return outp, f"✅ دیتاست پاک شد. تعداد رکوردهای نهایی: **{n}**"
991
+ clean_btn.click(_clean_handler, inputs=[raw_ds, sim_th], outputs=[cleaned_out, clean_status])
992
+
993
+ def _tune_handler(f, tk, ms, runs, bs, proj, ent, request: gr.Request):
994
+ return self.run_weight_tune(f, tk, ms, runs, bs, proj, ent, request)
995
+ run_tune.click(_tune_handler,
996
+ inputs=[tune_file, tune_text_key, tune_max_samples, tune_runs, tune_batch, tune_proj, tune_entity],
997
+ outputs=tune_status)
998
+
999
+ def _apply_best_handler(proj, ent, m, request: gr.Request):
1000
+ return self.apply_best_weights(proj, ent, m, request)
1001
+ apply_btn.click(_apply_best_handler,
1002
+ inputs=[tune_proj, tune_entity, metric_dd],
1003
+ outputs=tune_status)
1004
+
1005
+ # --- Lock non-consultation tabs for visitors on load ---
1006
+ def _gate_all(request: gr.Request):
1007
+ admin = is_admin(request)
1008
+ role_txt = f"👤 کاربر: **{_get_username(request) or 'Visitor'}** — دسترسی: {'مدیریتی' if admin else 'بازدیدکننده (فقط مشاهده)'}"
1009
+ if not admin:
1010
+ lock = gr.update(interactive=False)
1011
+ updates = [lock] * (len(index_widgets) + len(builder_widgets) + len(clean_widgets) + len(train_widgets) + len(tuning_widgets))
1012
+ else:
1013
+ unlock = gr.update(interactive=True)
1014
+ updates = [unlock] * (len(index_widgets) + len(builder_widgets) + len(clean_widgets) + len(train_widgets) + len(tuning_widgets))
1015
+ return [role_txt] + updates
1016
 
1017
+ app.load(_whoami, inputs=None, outputs=role_banner)
1018
+ app.load(_gate_all, inputs=None,
1019
+ outputs=[role_banner] + index_widgets + builder_widgets + clean_widgets + train_widgets + tuning_widgets)
1020
 
1021
+ return app
 
 
 
 
1022
 
1023
+ # ==========================
1024
+ # Entrypoint
1025
+ # ==========================
1026
  if __name__ == "__main__":
1027
+ app = LegalApp()
1028
+ ui = app.build_ui()
1029
  try:
1030
+ ui = ui.queue() # پایدار برای ZeroGPU
1031
  except TypeError:
1032
  pass
1033
+ ui.launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)