MasteredUltraInstinct commited on
Commit
58505c4
·
verified ·
1 Parent(s): 104b764

Create llm_service.py

Browse files
Files changed (1) hide show
  1. llm_service.py +34 -0
llm_service.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request
2
+ from pydantic import BaseModel
3
+ from transformers import pipeline
4
+ import uvicorn
5
+
6
+ app = FastAPI()
7
+
8
+ # Load TinyLlama model (only once)
9
+ llm_pipe = pipeline(
10
+ "text-generation",
11
+ model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
12
+ device="cpu",
13
+ max_new_tokens=100
14
+ )
15
+
16
+ class LaTeXRequest(BaseModel):
17
+ latex: str
18
+
19
+ @app.post("/fix-latex")
20
+ async def fix_latex(request: LaTeXRequest):
21
+ prompt = (
22
+ f"The following LaTeX was extracted from a distorted image of a polynomial and may contain errors:\n\n"
23
+ f"{request.latex}\n\n"
24
+ "Please rewrite it as a clean, correct LaTeX polynomial equation (e.g., x^2 - 5x + 6 = 0). "
25
+ "Only return the fixed LaTeX expression. No explanation."
26
+ )
27
+ output = llm_pipe(prompt)[0]["generated_text"]
28
+ # Strip the original prompt from the output
29
+ cleaned_output = output.replace(prompt, "").strip()
30
+ return {"fixed_latex": cleaned_output}
31
+
32
+ # For local testing (ignored on HF Spaces)
33
+ if __name__ == "__main__":
34
+ uvicorn.run(app, host="0.0.0.0", port=7860)