Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- README.md +74 -8
- app.py +75 -0
- requirements.txt +5 -0
README.md
CHANGED
|
@@ -1,11 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
---
|
| 10 |
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# TNM Endpoint (FastAPI on Hugging Face Spaces)
|
| 3 |
+
|
| 4 |
+
Endpoint يستقبل **تقرير طبي نصي** ويعيد **توقعات T/N/M** باستخدام موديلات منشورة على Hugging Face.
|
| 5 |
+
|
| 6 |
+
## 🔧 ما الذي يفعله؟
|
| 7 |
+
- `POST /predict_tnm` ← تدخل نص التقرير → يرجع `{ "T": ..., "N": ..., "M": ... }` مع درجات الثقة.
|
| 8 |
+
- يستخدم **HuggingFace Inference API**، لذلك **لا يحتاج GPU** داخل الـ Space.
|
| 9 |
+
|
| 10 |
---
|
| 11 |
+
|
| 12 |
+
## 🚀 خطوات الرفع على Hugging Face Spaces
|
| 13 |
+
|
| 14 |
+
1) افتح حسابك على Hugging Face ثم:
|
| 15 |
+
- New → **Create new Space**
|
| 16 |
+
- الاسم: `tnm-endpoint` (أو أي اسم)
|
| 17 |
+
- **SDK: FastAPI**
|
| 18 |
+
- **Hardware: CPU Basic** (يكفي لأن الحساب الفعلي يتم في Inference API)
|
| 19 |
+
- اختياري: **Private** لو تبي الإندبوينت خاص
|
| 20 |
+
|
| 21 |
+
2) بعد إنشاء الـ Space:
|
| 22 |
+
- من تبويب **Files & versions** ارفع الملفات التالية:
|
| 23 |
+
- `app.py`
|
| 24 |
+
- `requirements.txt`
|
| 25 |
+
- `README.md` (اختياري)
|
| 26 |
+
|
| 27 |
+
3) أضف **Secret** للمفتاح:
|
| 28 |
+
- Settings → Secrets → Add new secret
|
| 29 |
+
- Name: `HF_TOKEN`
|
| 30 |
+
- Value: (توكن حسابك من https://huggingface.co/settings/tokens)
|
| 31 |
+
- احفظ
|
| 32 |
+
|
| 33 |
+
> الموديلات العامة قد تعمل بدون توكن، لكن التوكن يضمن لك استقرار وحدود أعلى ويُلزم للموديلات الخاصة.
|
| 34 |
+
|
| 35 |
+
4) انتظر حتى يصبح Space في حالة **Running**.
|
| 36 |
+
|
| 37 |
+
5) اختبر الصحة:
|
| 38 |
+
- GET على: `https://USERNAME-SPACE_NAME.hf.space/healthz`
|
| 39 |
+
|
| 40 |
+
6) جرّب التنبؤ:
|
| 41 |
+
```bash
|
| 42 |
+
curl -X POST "https://USERNAME-SPACE_NAME.hf.space/predict_tnm" \
|
| 43 |
+
-H "Content-Type: application/json" \
|
| 44 |
+
-d '{"text": "Tumor size is 3 cm with positive axillary lymph nodes, no distant metastasis detected."}'
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
**رد متوقع** (مثال):
|
| 48 |
+
```json
|
| 49 |
+
{
|
| 50 |
+
"input_chars": 92,
|
| 51 |
+
"tnm": {
|
| 52 |
+
"T": {"label": "T2", "score": 0.91, "raw": [...]},
|
| 53 |
+
"N": {"label": "N1", "score": 0.88, "raw": [...]},
|
| 54 |
+
"M": {"label": "M0", "score": 0.95, "raw": [...]}
|
| 55 |
+
},
|
| 56 |
+
"tnm_string": "T2 N1 M0",
|
| 57 |
+
"meta": {"models": {"T": "jkefeli/CancerStage_Classifier_T", "N": "jkefeli/CancerStage_Classifier_N", "M": "jkefeli/CancerStage_Classifier_M"}}
|
| 58 |
+
}
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
---
|
| 62 |
|
| 63 |
+
## 🔁 تخصيص أسماء الموديلات
|
| 64 |
+
تقدر تغيّر الموديلات بدون تعديل الكود عبر **Environment variables** داخل Settings → Variables:
|
| 65 |
+
|
| 66 |
+
- `MODEL_T` (الافتراضي: `jkefeli/CancerStage_Classifier_T`)
|
| 67 |
+
- `MODEL_N` (الافتراضي: `jkefeli/CancerStage_Classifier_N`)
|
| 68 |
+
- `MODEL_M` (الافتراضي: `jkefeli/CancerStage_Classifier_M`)
|
| 69 |
+
|
| 70 |
+
---
|
| 71 |
+
|
| 72 |
+
## 🧠 ملاحظات مهمة
|
| 73 |
+
- إذا أحد الموديلات غير متوافق مع مهمة `text-classification` في Inference API، جرّب موديل بديل أو حمّل الموديل محليًا (يتطلب GPU Space).
|
| 74 |
+
- لو تبغى **Batch API**، تقدر تضيف مسار جديد يقبل `List[str]` ويعيد قائمة من النتائج.
|
| 75 |
+
- للأمان، فكّر بإضافة **API Key** بسيط (Header) تتحقق منه قبل تنفيذ الطلب.
|
| 76 |
+
|
| 77 |
+
بالتوفيق! ✨
|
app.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
from fastapi import FastAPI, HTTPException
|
| 4 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 5 |
+
from pydantic import BaseModel
|
| 6 |
+
from huggingface_hub import InferenceClient
|
| 7 |
+
|
| 8 |
+
# === Config ===
|
| 9 |
+
HF_TOKEN = os.getenv("HF_TOKEN") # add this as a Secret in your Space
|
| 10 |
+
MODEL_T = os.getenv("MODEL_T", "jkefeli/CancerStage_Classifier_T")
|
| 11 |
+
MODEL_N = os.getenv("MODEL_N", "jkefeli/CancerStage_Classifier_N")
|
| 12 |
+
MODEL_M = os.getenv("MODEL_M", "jkefeli/CancerStage_Classifier_M")
|
| 13 |
+
|
| 14 |
+
# Initialize FastAPI
|
| 15 |
+
app = FastAPI(title="TNM Endpoint", version="1.0.0")
|
| 16 |
+
|
| 17 |
+
# CORS (optional): allow all origins by default; tighten for production
|
| 18 |
+
app.add_middleware(
|
| 19 |
+
CORSMiddleware,
|
| 20 |
+
allow_origins=["*"],
|
| 21 |
+
allow_credentials=True,
|
| 22 |
+
allow_methods=["*"],
|
| 23 |
+
allow_headers=["*"],
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# Init Hugging Face Inference clients (remote inference, no heavy compute on your Space)
|
| 27 |
+
clients = {
|
| 28 |
+
"T": InferenceClient(model=MODEL_T, token=HF_TOKEN),
|
| 29 |
+
"N": InferenceClient(model=MODEL_N, token=HF_TOKEN),
|
| 30 |
+
"M": InferenceClient(model=MODEL_M, token=HF_TOKEN),
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
class ReportInput(BaseModel):
|
| 34 |
+
text: str
|
| 35 |
+
|
| 36 |
+
@app.get("/healthz")
|
| 37 |
+
def healthz():
|
| 38 |
+
return {"status": "ok", "models": {"T": MODEL_T, "N": MODEL_N, "M": MODEL_M}}
|
| 39 |
+
|
| 40 |
+
def _classify(client: InferenceClient, text: str):
|
| 41 |
+
# Uses HF Inference API task: text-classification
|
| 42 |
+
# Returns best label with score
|
| 43 |
+
try:
|
| 44 |
+
outputs = client.text_classification(text, wait_for_model=True)
|
| 45 |
+
if not outputs:
|
| 46 |
+
raise ValueError("Empty response from model")
|
| 47 |
+
best = max(outputs, key=lambda x: x.get("score", 0))
|
| 48 |
+
return {"label": best.get("label", "UNKNOWN"), "score": float(best.get("score", 0.0)), "raw": outputs}
|
| 49 |
+
except Exception as e:
|
| 50 |
+
raise HTTPException(status_code=502, detail=f"Inference error: {e}")
|
| 51 |
+
|
| 52 |
+
@app.post("/predict_tnm")
|
| 53 |
+
def predict_tnm(input: ReportInput):
|
| 54 |
+
text = (input.text or "").strip()
|
| 55 |
+
if not text:
|
| 56 |
+
raise HTTPException(status_code=400, detail="Empty 'text'")
|
| 57 |
+
# Optionally hard-truncate very long inputs to avoid API limits
|
| 58 |
+
if len(text) > 20000:
|
| 59 |
+
text = text[:20000]
|
| 60 |
+
|
| 61 |
+
preds = {}
|
| 62 |
+
for key, client in clients.items():
|
| 63 |
+
preds[key] = _classify(client, text)
|
| 64 |
+
|
| 65 |
+
t = preds["T"]["label"]
|
| 66 |
+
n = preds["N"]["label"]
|
| 67 |
+
m = preds["M"]["label"]
|
| 68 |
+
tnm_string = f"{t} {n} {m}"
|
| 69 |
+
|
| 70 |
+
return {
|
| 71 |
+
"input_chars": len(text),
|
| 72 |
+
"tnm": preds,
|
| 73 |
+
"tnm_string": tnm_string,
|
| 74 |
+
"meta": {"models": {"T": MODEL_T, "N": MODEL_N, "M": MODEL_M}}
|
| 75 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
fastapi>=0.110.0
|
| 3 |
+
uvicorn>=0.23.0
|
| 4 |
+
pydantic>=2.0.0
|
| 5 |
+
huggingface_hub>=0.24.0
|