Update app.py
Browse files
app.py
CHANGED
|
@@ -23,19 +23,27 @@ llm = None
|
|
| 23 |
@asynccontextmanager
|
| 24 |
async def lifespan(app: FastAPI):
|
| 25 |
global llm
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
yield
|
| 41 |
|
|
@@ -127,19 +135,31 @@ Return ONLY the humanized text.
|
|
| 127 |
# ==================================================
|
| 128 |
@app.post("/humanize")
|
| 129 |
def humanize(req: HumanizeRequest):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
if not req.text.strip():
|
| 131 |
raise HTTPException(status_code=400, detail="Input text is empty")
|
| 132 |
|
| 133 |
prompt = build_prompt(req.text, req.section, req.author_notes)
|
| 134 |
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
|
| 143 |
return {
|
| 144 |
"humanized_text": response["choices"][0]["text"].strip()
|
| 145 |
-
}
|
|
|
|
| 23 |
@asynccontextmanager
|
| 24 |
async def lifespan(app: FastAPI):
|
| 25 |
global llm
|
| 26 |
+
try:
|
| 27 |
+
print("⏳ Downloading model...")
|
| 28 |
+
model_path = hf_hub_download(
|
| 29 |
+
repo_id=MODEL_REPO,
|
| 30 |
+
filename=MODEL_FILE,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
print("✅ Model downloaded. Loading...")
|
| 34 |
+
llm = Llama(
|
| 35 |
+
model_path=model_path,
|
| 36 |
+
n_ctx=N_CTX,
|
| 37 |
+
n_threads=N_THREADS,
|
| 38 |
+
n_batch=N_BATCH,
|
| 39 |
+
verbose=False,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
print("🚀 Model loaded successfully")
|
| 43 |
+
|
| 44 |
+
except Exception as e:
|
| 45 |
+
print("❌ Model load failed:", e)
|
| 46 |
+
llm = None
|
| 47 |
|
| 48 |
yield
|
| 49 |
|
|
|
|
| 135 |
# ==================================================
|
| 136 |
@app.post("/humanize")
|
| 137 |
def humanize(req: HumanizeRequest):
|
| 138 |
+
if llm is None:
|
| 139 |
+
raise HTTPException(
|
| 140 |
+
status_code=503,
|
| 141 |
+
detail="Model is still loading. Please try again in a few seconds."
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
if not req.text.strip():
|
| 145 |
raise HTTPException(status_code=400, detail="Input text is empty")
|
| 146 |
|
| 147 |
prompt = build_prompt(req.text, req.section, req.author_notes)
|
| 148 |
|
| 149 |
+
try:
|
| 150 |
+
response = llm(
|
| 151 |
+
prompt,
|
| 152 |
+
max_tokens=400,
|
| 153 |
+
temperature=0.4,
|
| 154 |
+
top_p=0.9,
|
| 155 |
+
repetition_penalty=1.1,
|
| 156 |
+
)
|
| 157 |
+
except Exception as e:
|
| 158 |
+
raise HTTPException(
|
| 159 |
+
status_code=500,
|
| 160 |
+
detail=f"Inference error: {str(e)}"
|
| 161 |
+
)
|
| 162 |
|
| 163 |
return {
|
| 164 |
"humanized_text": response["choices"][0]["text"].strip()
|
| 165 |
+
}
|