StevenMSAI commited on
Commit
0e21d39
·
verified ·
1 Parent(s): 91fd18e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -1,24 +1,23 @@
1
  import os
2
  import gradio as gr
3
 
4
- # Optional: only if torch is installed (it is in your requirements)
 
5
  try:
6
  import torch
7
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
8
  try:
9
- torch.set_num_threads(2) # nicer on 2 vCPUs
10
  except Exception:
11
  pass
12
  except Exception:
13
- # If torch import ever fails, don't crash the app
14
  pass
15
 
16
  from transformers import pipeline
17
 
18
- # ---------------- Model config ----------------
19
  GEN_MODEL_NAME = "MBZUAI/LaMini-Flan-T5-248M" # CPU-friendly text2text model
20
 
21
- # Lazy-load the generator once
22
  _t2t = None
23
  def get_t2t():
24
  global _t2t
@@ -26,9 +25,9 @@ def get_t2t():
26
  _t2t = pipeline(
27
  "text2text-generation",
28
  model=GEN_MODEL_NAME,
29
- tokenizer=GEN_MODEL_NAME, # explicit to avoid mismatch
30
- device_map="cpu" # force CPU on free tier
31
  )
 
32
  return _t2t
33
 
34
  def ai_fallback(prompt: str) -> str:
@@ -41,7 +40,7 @@ def ai_fallback(prompt: str) -> str:
41
  )[0]["generated_text"]
42
  return (out or "").strip()
43
  except Exception as e:
44
- # Log real error to console/Space logs to help debug if anything else fails
45
  print("AI fallback error:", repr(e))
46
  return "AI fallback had an issue. Please try a simpler question or use the topics in 'help'."
47
 
 
1
  import os
2
  import gradio as gr
3
 
4
+ # Optional CPU hygiene
5
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
6
  try:
7
  import torch
 
8
  try:
9
+ torch.set_num_threads(2) # nice on 2 vCPUs
10
  except Exception:
11
  pass
12
  except Exception:
 
13
  pass
14
 
15
  from transformers import pipeline
16
 
17
+ # -------- Model config --------
18
  GEN_MODEL_NAME = "MBZUAI/LaMini-Flan-T5-248M" # CPU-friendly text2text model
19
 
20
+ # Lazy-loaded generator
21
  _t2t = None
22
  def get_t2t():
23
  global _t2t
 
25
  _t2t = pipeline(
26
  "text2text-generation",
27
  model=GEN_MODEL_NAME,
28
+ tokenizer=GEN_MODEL_NAME
 
29
  )
30
+ print(f"[startup] Loaded model: {GEN_MODEL_NAME}")
31
  return _t2t
32
 
33
  def ai_fallback(prompt: str) -> str:
 
40
  )[0]["generated_text"]
41
  return (out or "").strip()
42
  except Exception as e:
43
+ # Print the real error to logs so you can see it in the console
44
  print("AI fallback error:", repr(e))
45
  return "AI fallback had an issue. Please try a simpler question or use the topics in 'help'."
46