Update app.py
Browse files
app.py
CHANGED
|
@@ -25,9 +25,14 @@ SUMMARY_PRESETS = {
|
|
| 25 |
# -----------------------------
|
| 26 |
# Load tokenizer & pipeline
|
| 27 |
# -----------------------------
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
# -----------------------------
|
| 33 |
# Helpers: file reading
|
|
@@ -241,10 +246,8 @@ demo = gr.Interface(
|
|
| 241 |
"Paste text or upload a PDF/DOCX. The system splits long input by tokens, summarizes each chunk,"
|
| 242 |
" then optionally performs a 2nd-pass summarization to produce a concise final summary."
|
| 243 |
),
|
| 244 |
-
allow_flagging="never",
|
| 245 |
examples=[],
|
| 246 |
)
|
| 247 |
|
| 248 |
if __name__ == "__main__":
|
| 249 |
-
# on Spaces this will be ignored and Gradio will serve automatically
|
| 250 |
demo.launch()
|
|
|
|
| 25 |
# -----------------------------
|
| 26 |
# Load tokenizer & pipeline
|
| 27 |
# -----------------------------
|
| 28 |
+
# Wrap load in try/except so we can see clear startup errors in logs
|
| 29 |
+
try:
|
| 30 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 31 |
+
summarizer = pipeline("summarization", model=MODEL_NAME, tokenizer=tokenizer, device=DEVICE)
|
| 32 |
+
print(f"Loaded model {MODEL_NAME} on {'CPU' if DEVICE == -1 else 'GPU'}")
|
| 33 |
+
except Exception as e:
|
| 34 |
+
# Let the Space fail loudly but with a helpful message in logs
|
| 35 |
+
raise RuntimeError(f"Model load failed: {e}")
|
| 36 |
|
| 37 |
# -----------------------------
|
| 38 |
# Helpers: file reading
|
|
|
|
| 246 |
"Paste text or upload a PDF/DOCX. The system splits long input by tokens, summarizes each chunk,"
|
| 247 |
" then optionally performs a 2nd-pass summarization to produce a concise final summary."
|
| 248 |
),
|
|
|
|
| 249 |
examples=[],
|
| 250 |
)
|
| 251 |
|
| 252 |
if __name__ == "__main__":
|
|
|
|
| 253 |
demo.launch()
|