JDhruv14 commited on
Commit
8abefbc
·
verified ·
1 Parent(s): 8db29b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -6
app.py CHANGED
@@ -1,12 +1,10 @@
1
  import os, torch, gradio as gr, spaces
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
3
 
4
- MODEL_ID = os.getenv("MODEL_ID", "JDhruv14/fullnfinal")
5
 
6
- # --- System prompt (Gita persona) ---
7
  GITA_SYSTEM_PROMPT = """You are Lord Krishna—the serene, compassionate teacher of the Bhagavad Gita."""
8
 
9
- # Load once (CPU until first call; device_map will move to GPU on first run)
10
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
11
  model = AutoModelForCausalLM.from_pretrained(
12
  MODEL_ID,
@@ -26,7 +24,6 @@ def _msgs_from_history(history, system_text):
26
  if not history:
27
  return msgs
28
 
29
- # Support both new "messages" format and legacy (user, assistant) tuples
30
  if isinstance(history[0], dict) and "role" in history[0] and "content" in history[0]:
31
  for m in history:
32
  role, content = m.get("role"), m.get("content")
@@ -41,7 +38,6 @@ def _msgs_from_history(history, system_text):
41
  return msgs
42
 
43
  def _eos_ids(tok):
44
- # Support ints/lists and optional <|im_end|>
45
  ids = set()
46
  if tok.eos_token_id is not None:
47
  if isinstance(tok.eos_token_id, (list, tuple)):
@@ -86,7 +82,6 @@ def chat_fn(message, history, system_text, temperature, top_p, max_new, min_new)
86
 
87
  @spaces.GPU()
88
  def gradio_fn(message, history):
89
- # Inject the Gita system prompt here
90
  return chat_fn(
91
  message=message,
92
  history=history,
 
1
  import os, torch, gradio as gr, spaces
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
3
 
4
+ MODEL_ID = os.getenv("MODEL_ID", "JDhruv14/JDhruv14/Qwen2.5-3B-Gita-FT")
5
 
 
6
  GITA_SYSTEM_PROMPT = """You are Lord Krishna—the serene, compassionate teacher of the Bhagavad Gita."""
7
 
 
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
9
  model = AutoModelForCausalLM.from_pretrained(
10
  MODEL_ID,
 
24
  if not history:
25
  return msgs
26
 
 
27
  if isinstance(history[0], dict) and "role" in history[0] and "content" in history[0]:
28
  for m in history:
29
  role, content = m.get("role"), m.get("content")
 
38
  return msgs
39
 
40
  def _eos_ids(tok):
 
41
  ids = set()
42
  if tok.eos_token_id is not None:
43
  if isinstance(tok.eos_token_id, (list, tuple)):
 
82
 
83
  @spaces.GPU()
84
  def gradio_fn(message, history):
 
85
  return chat_fn(
86
  message=message,
87
  history=history,