# Root FastAPI
import os
import json
import time, logging
import threading
import datetime as dt
from typing import Optional, Dict
from fastapi import FastAPI, HTTPException, BackgroundTasks, Request
from fastapi.responses import HTMLResponse, JSONResponse
from pydantic import BaseModel
from dotenv import load_dotenv
from utils.datasets import resolve_dataset, hf_download_dataset
from utils.processor import process_file_into_sft
from utils.rag import process_file_into_rag
from utils.drive_saver import DriveSaver
from utils.llm import Paraphraser
from utils.schema import CentralisedWriter, RAGWriter
from utils.token import get_credentials, exchange_code, build_auth_url
from vi.translator import VietnameseTranslator
# ────────── Log ───────────
logger = logging.getLogger("app")
if not logger.handlers:
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
logger.addHandler(handler)
# ────────── Boot ──────────
load_dotenv(override=True)
SPACE_NAME = os.getenv("SPACE_NAME", "MedAI Processor")
OUTPUT_DIR = os.path.abspath(os.getenv("OUTPUT_DIR", "cache/outputs"))
LOG_DIR = os.path.abspath(os.getenv("LOG_DIR", "logs"))
os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs(LOG_DIR, exist_ok=True)
# --- Bootstrap Google OAuth ---
try:
creds = get_credentials()
if creds:
logger.info("✅ OAuth credentials loaded and valid")
except Exception as e:
logger.warning(f"⚠️ OAuth not initialized yet: {e}")
# --- Bootstrap Google Drive ---
drive = DriveSaver(default_folder_id=os.getenv("GDRIVE_FOLDER_ID"))
# LLM rotator with paraphraser nodes
paraphraser = Paraphraser(
nvidia_model=os.getenv("NVIDIA_MODEL", "meta/llama-3.1-8b-instruct"),
gemini_model_easy=os.getenv("GEMINI_MODEL_EASY", "gemini-2.5-flash-lite"),
gemini_model_hard=os.getenv("GEMINI_MODEL_HARD", "gemini-2.5-flash"),
)
# Vietnamese translator (currently using Helsinki-NLP/opus-mt-en-vi)
vietnamese_translator = VietnameseTranslator()
app = FastAPI(title="Medical Dataset Augmenter", version="1.1.0")
STATE_LOCK = threading.Lock()
STATE: Dict[str, object] = {
"running": False,
"dataset": None,
"started_at": None,
"progress": 0.0,
"message": "idle",
"last_result": None
}
class AugmentOptions(BaseModel):
# ratios are 0..1
paraphrase_ratio: float = 0.2
paraphrase_outputs: bool = True
backtranslate_ratio: float = 0.1
style_standardize: bool = True
deidentify: bool = True
dedupe: bool = True
max_chars: int = 5000 # cap extremely long contexts
consistency_check_ratio: float = 0.05 # small ratio e.g. 0.01
# KD / distillation (optional, keeps default off)
distill_fraction: float = 0.0 # for unlabeled only
expand: bool = True # Enable back-translation and complex augmentation
max_aug_per_sample: int = 2 # Between 1-3, number of LLM call to augment/paraphrase data
class ProcessParams(BaseModel):
augment: AugmentOptions = AugmentOptions()
sample_limit: Optional[int] = None # Set data sampling if needed
seed: int = 42
rag_processing: bool = False # Enable RAG-specific processing
vietnamese_translation: bool = False # Enable Vietnamese translation
def set_state(**kwargs):
with STATE_LOCK:
STATE.update(kwargs)
def now_iso():
return dt.datetime.utcnow().isoformat()
# Instructional UI
@app.get("/", response_class=HTMLResponse)
def root():
return f"""
{SPACE_NAME} – Medical Dataset Augmenter
📊 {SPACE_NAME} – Medical Dataset Augmenter
This Hugging Face Space processes medical datasets into a centralised fine-tuning format
(JSONL + CSV), with optional data augmentation.
⚡ Quick Actions
Click a button below to start processing a dataset with default augmentation parameters.
RAG Processing: - Convert to QCA format for RAG systems
📝 Log
Click a button above to run a job...
"""
@app.get("/status")
def status():
with STATE_LOCK:
return JSONResponse(STATE)
# ──────── GCS token ────────
@app.get("/oauth2/start")
def oauth2_start(request: Request):
# Compute redirect URI dynamically from the actual host the Space is using
host = request.headers.get("x-forwarded-host") or request.headers.get("host")
scheme = "https" # Spaces are HTTPS at the edge
redirect_uri = f"{scheme}://{host}/oauth2/callback"
try:
url = build_auth_url(redirect_uri)
return JSONResponse({"authorize_url": url})
except Exception as e:
raise HTTPException(500, f"OAuth init failed: {e}")
# Display your token
@app.get("/oauth2/callback")
def oauth2_callback(request: Request, code: str = "", state: str = ""):
if not code:
raise HTTPException(400, "Missing 'code'")
# Send req
host = request.headers.get("x-forwarded-host") or request.headers.get("host")
scheme = "https"
redirect_uri = f"{scheme}://{host}/oauth2/callback"
# Parse and show token code
try:
creds = exchange_code(code, redirect_uri)
refresh = creds.refresh_token or os.getenv("GDRIVE_REFRESH_TOKEN", "")
# UI
html = f"""
✅ Google Drive Authorized
Your refresh token is:
{refresh}
👉 Copy this token and save it into your Hugging Face Space Secrets
as GDRIVE_REFRESH_TOKEN.
This ensures persistence across rebuilds.
"""
return HTMLResponse(html)
except Exception as e:
raise HTTPException(500, f"OAuth exchange failed: {e}")
@app.get("/files")
def files():
out = []
for root, _, fns in os.walk(OUTPUT_DIR):
for fn in fns:
out.append(os.path.relpath(os.path.join(root, fn), OUTPUT_DIR))
return {"output_dir": OUTPUT_DIR, "files": sorted(out)}
@app.post("/process/{dataset_key}")
def process_dataset(dataset_key: str, params: ProcessParams, background: BackgroundTasks):
with STATE_LOCK:
if STATE["running"]:
logger.warning(
f"[JOB] Rejecting new job dataset={dataset_key} "
f"current={STATE['dataset']} started_at={STATE['started_at']}"
)
raise HTTPException(409, detail="Another job is running.")
STATE["running"] = True
STATE["dataset"] = dataset_key
STATE["started_at"] = now_iso()
STATE["progress"] = 0.0
STATE["message"] = "starting"
STATE["last_result"] = None
logger.info(
f"[JOB] Queued dataset={dataset_key} "
f"params={{'sample_limit': {params.sample_limit}, 'seed': {params.seed}, "
f"'rag_processing': {params.rag_processing}, 'augment': {params.augment.dict()} }}"
)
# Start job to background runner thread
logger.info(f"[JOB] Started dataset={dataset_key}")
background.add_task(_run_job, dataset_key, params)
return {"ok": True, "message": f"Job for '{dataset_key}' started."}
@app.post("/rag/{dataset_key}")
def process_rag_dataset(dataset_key: str, params: ProcessParams, background: BackgroundTasks):
"""Dedicated RAG processing endpoint"""
# Force RAG processing mode
params.rag_processing = True
with STATE_LOCK:
if STATE["running"]:
logger.warning(
f"[RAG] Rejecting new RAG job dataset={dataset_key} "
f"current={STATE['dataset']} started_at={STATE['started_at']}"
)
raise HTTPException(409, detail="Another job is running.")
STATE["running"] = True
STATE["dataset"] = dataset_key
STATE["started_at"] = now_iso()
STATE["progress"] = 0.0
STATE["message"] = "starting RAG processing"
STATE["last_result"] = None
logger.info(
f"[RAG] Queued RAG dataset={dataset_key} "
f"params={{'sample_limit': {params.sample_limit}, 'seed': {params.seed} }}"
)
# Start job to background runner thread
logger.info(f"[RAG] Started RAG dataset={dataset_key}")
background.add_task(_run_job, dataset_key, params)
return {"ok": True, "message": f"RAG processing job for '{dataset_key}' started."}
def _run_job(dataset_key: str, params: ProcessParams):
t0 = time.time()
try:
ds = resolve_dataset(dataset_key)
if not ds:
set_state(running=False, message="unknown dataset")
return
# Download HF Dataset and start processing units
set_state(message="downloading")
local_path = hf_download_dataset(ds["repo_id"], ds["filename"], ds["repo_type"])
logger.info(f"[JOB] Downloaded {ds['repo_id']}/{ds['filename']} → {local_path}")
# Prepare timestamp for fire writing
ts = dt.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
mode_suffix = "rag" if params.rag_processing else "sft"
stem = f"{dataset_key}-{mode_suffix}-{ts}"
jsonl_path = os.path.join(OUTPUT_DIR, f"{stem}.jsonl")
csv_path = os.path.join(OUTPUT_DIR, f"{stem}.csv")
# Change state
set_state(message="processing", progress=0.05)
# Writer
writer = RAGWriter(jsonl_path=jsonl_path, csv_path=csv_path) if params.rag_processing else CentralisedWriter(jsonl_path=jsonl_path, csv_path=csv_path)
# Load translator if Vietnamese translation is requested
translator = None
if params.vietnamese_translation:
set_state(message="Loading Vietnamese translator", progress=0.05)
try:
vietnamese_translator.load_model()
translator = vietnamese_translator
logger.info("✅ Vietnamese translator loaded successfully")
except Exception as e:
logger.error(f"❌ Failed to load Vietnamese translator: {e}")
set_state(message=f"Warning: Vietnamese translation failed - {e}", progress=0.1)
if params.rag_processing:
# RAG processing mode
set_state(message="RAG processing", progress=0.1)
count, stats = process_file_into_rag(
dataset_key=dataset_key,
input_path=local_path,
writer=writer,
nvidia_model=os.getenv("NVIDIA_MODEL", "meta/llama-3.1-8b-instruct"),
sample_limit=params.sample_limit,
seed=params.seed,
progress_cb=lambda p, msg=None: set_state(progress=p, message=msg or STATE["message"]),
translator=translator,
paraphraser=paraphraser
)
else:
# Standard SFT processing mode
set_state(message="SFT processing", progress=0.1)
# Add Vietnamese translation flag to augment options
augment_opts = params.augment.dict()
augment_opts["vietnamese_translation"] = params.vietnamese_translation
count, stats = process_file_into_sft(
dataset_key=dataset_key,
input_path=local_path,
writer=writer,
paraphraser=paraphraser,
augment_opts=augment_opts,
sample_limit=params.sample_limit,
seed=params.seed,
progress_cb=lambda p, msg=None: set_state(progress=p, message=msg or STATE["message"]),
translator=translator
)
logger.info(f"[JOB] Processed dataset={dataset_key} rows={count} stats={stats}")
writer.close()
# Upload to GDrive
set_state(message="uploading to Google Drive", progress=0.95)
up1 = drive.upload_file_to_drive(jsonl_path, mimetype="application/json")
up2 = drive.upload_file_to_drive(csv_path, mimetype="text/csv")
logger.info(
f"[JOB] Uploads complete uploaded={bool(up1 and up2)} "
f"jsonl={jsonl_path} csv={csv_path}"
)
# Finalize a task
result = {
"dataset": dataset_key,
"processing_mode": "RAG" if params.rag_processing else "SFT",
"processed_rows": count,
"stats": stats,
"artifacts": {"jsonl": jsonl_path, "csv": csv_path},
"uploaded": bool(up1 and up2),
"duration_sec": round(time.time() - t0, 2)
}
set_state(message="done", progress=1.0, last_result=result, running=False)
logger.info(
f"[JOB] Finished dataset={dataset_key} "
f"duration_sec={round(time.time()-t0, 2)}"
)
except Exception as e:
logger.exception(f"[JOB] Error for dataset={dataset_key}: {e}")
set_state(message=f"error: {e}", running=False)