import os from fastapi import FastAPI from pydantic import BaseModel from fastapi.responses import JSONResponse from transformers import pipeline # ✅ Set writable cache location os.environ["TRANSFORMERS_CACHE"] = "/data/cache" os.makedirs("/data/cache", exist_ok=True) # ✅ Load model safely pipe = pipeline("text2text-generation", model="google/flan-t5-small", max_new_tokens=100) app = FastAPI() class LaTeXRequest(BaseModel): latex: str @app.post("/fix") async def fix_latex(data: LaTeXRequest): prompt = f"Fix this malformed LaTeX expression so it's valid:\n{data.latex}" try: result = pipe(prompt)[0]["generated_text"] return JSONResponse(content={"fixed_latex": result.strip()}) except Exception as e: return JSONResponse(content={"error": str(e)}, status_code=500)