Shining-Data commited on
Commit
f17b0c4
·
verified ·
1 Parent(s): d46278b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -12
app.py CHANGED
@@ -80,7 +80,7 @@ def load_pipeline(model_name):
80
  Tries bfloat16, falls back to float16 or float32 if unsupported.
81
  """
82
  global PIPELINES
83
- if model_name in PIPELINES:
84
  return PIPELINES[model_name]
85
  repo = MODELS[model_name]["repo_id"]
86
  tokenizer = AutoTokenizer.from_pretrained(repo)
@@ -148,13 +148,6 @@ def chat_response(user_msg, chat_history, system_prompt,
148
  debug = 'Web search disabled.'
149
 
150
  try:
151
-
152
- # merge any fetched search results into the system prompt
153
- if search_results:
154
- enriched = system_prompt.strip() + "\n\nRelevant context:\n" + "\n".join(search_results)
155
- else:
156
- enriched = system_prompt
157
-
158
  # wait up to 1s for snippets, then replace debug with them
159
  if enable_search:
160
  thread_search.join(timeout=float(search_timeout))
@@ -165,16 +158,18 @@ def chat_response(user_msg, chat_history, system_prompt,
165
  else:
166
  debug = "*No web search results found.*"
167
 
168
- # merge fetched snippets into the system prompt
169
- if search_results:
170
- enriched = system_prompt.strip() + "\n\nRelevant context:\n" + "\n".join(search_results)
 
 
171
  else:
172
  enriched = system_prompt
173
 
174
  pipe = load_pipeline(model_name)
175
  prompt = format_conversation(history, enriched, pipe["tokenizer"])
176
  prompt_debug = f"\n\n--- Prompt Preview ---\n```\n{prompt}\n```"
177
- streamer = TextIteratorStreamer(pipe["tokenizer"],
178
  skip_prompt=True,
179
  skip_special_tokens=True)
180
  generation_config = dict(
 
80
  Tries bfloat16, falls back to float16 or float32 if unsupported.
81
  """
82
  global PIPELINES
83
+ if model_name in PIPELINES.keys():
84
  return PIPELINES[model_name]
85
  repo = MODELS[model_name]["repo_id"]
86
  tokenizer = AutoTokenizer.from_pretrained(repo)
 
148
  debug = 'Web search disabled.'
149
 
150
  try:
 
 
 
 
 
 
 
151
  # wait up to 1s for snippets, then replace debug with them
152
  if enable_search:
153
  thread_search.join(timeout=float(search_timeout))
 
158
  else:
159
  debug = "*No web search results found.*"
160
 
161
+ # merge fetched snippets into the system prompt
162
+ if search_results:
163
+ enriched = system_prompt.strip() + "\n\nRelevant context:\n" + "\n".join(search_results)
164
+ else:
165
+ enriched = system_prompt
166
  else:
167
  enriched = system_prompt
168
 
169
  pipe = load_pipeline(model_name)
170
  prompt = format_conversation(history, enriched, pipe["tokenizer"])
171
  prompt_debug = f"\n\n--- Prompt Preview ---\n```\n{prompt}\n```"
172
+ streamer = TextIterStreamer(pipe["tokenizer"],
173
  skip_prompt=True,
174
  skip_special_tokens=True)
175
  generation_config = dict(