BaoKhuong commited on
Commit
76d55bc
·
verified ·
1 Parent(s): 4cc5fe9

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -12
app.py CHANGED
@@ -9,7 +9,7 @@ os.makedirs(os.environ["HF_HOME"], exist_ok=True)
9
  os.makedirs(os.environ["XDG_CACHE_HOME"], exist_ok=True)
10
 
11
  import json
12
- from typing import Dict, List
13
 
14
  import gradio as gr
15
  import requests
@@ -100,7 +100,20 @@ def load_model():
100
  return _llm
101
 
102
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  def generate_response(prompt: str, temperature: float = 0.2, max_new_tokens: int = 384) -> str:
 
104
  llm = load_model()
105
  res = llm(
106
  prompt=prompt,
@@ -244,8 +257,8 @@ def build_equity_research_prompt(symbol: str, overview: Dict) -> str:
244
  # -------- Gradio UI --------
245
 
246
  def ui_app():
247
- with gr.Blocks(title="Fin-o1-14B Tools") as demo:
248
- gr.Markdown("""# Fin-o1-14B Tools
249
  Two tabs: Price Prediction (Finnhub with Alpha Vantage fallback) and Equity Research (Alpha Vantage via RapidAPI).""")
250
 
251
  with gr.Tab("Price Prediction"):
@@ -266,13 +279,9 @@ Two tabs: Price Prediction (Finnhub with Alpha Vantage fallback) and Equity Rese
266
  except Exception as e2:
267
  return f"Error fetching candles: {e2}"
268
  prompt = build_price_prediction_prompt(sym, candles)
269
- try:
270
- resp = generate_response(prompt, temperature=temperature, max_new_tokens=int(max_tokens))
271
- return resp
272
- except Exception as e:
273
- return f"Error generating response: {e}"
274
 
275
- btn.click(on_predict, inputs=[symbol, resolution, count, temp, max_new], outputs=out)
276
 
277
  with gr.Tab("Equity Research Report"):
278
  symbol2 = gr.Textbox(label="Ticker (e.g., MSFT)", value="MSFT")
@@ -285,13 +294,14 @@ Two tabs: Price Prediction (Finnhub with Alpha Vantage fallback) and Equity Rese
285
  try:
286
  overview = fetch_alpha_vantage_overview(sym)
287
  prompt = build_equity_research_prompt(sym, overview)
288
- resp = generate_response(prompt, temperature=temperature, max_new_tokens=int(max_tokens))
289
- return resp
290
  except Exception as e:
291
  return f"Error generating report: {e}"
292
 
293
- btn2.click(on_report, inputs=[symbol2, temp2, max_new2], outputs=out2)
294
 
 
 
295
  return demo
296
 
297
 
 
9
  os.makedirs(os.environ["XDG_CACHE_HOME"], exist_ok=True)
10
 
11
  import json
12
+ from typing import Dict, List, Generator
13
 
14
  import gradio as gr
15
  import requests
 
100
  return _llm
101
 
102
 
103
+ def generate_response_stream(prompt: str, temperature: float = 0.2, max_new_tokens: int = 384) -> Generator[str, None, None]:
104
+ yield "Initializing model..."
105
+ llm = load_model()
106
+ yield "Model loaded. Generating..."
107
+ accum = ""
108
+ for chunk in llm(prompt=prompt, max_tokens=max_new_tokens, temperature=temperature, stream=True):
109
+ text = chunk.get("choices", [{}])[0].get("text", "")
110
+ if text:
111
+ accum += text
112
+ yield accum
113
+
114
+
115
  def generate_response(prompt: str, temperature: float = 0.2, max_new_tokens: int = 384) -> str:
116
+ # non-streaming fallback
117
  llm = load_model()
118
  res = llm(
119
  prompt=prompt,
 
257
  # -------- Gradio UI --------
258
 
259
  def ui_app():
260
+ with gr.Blocks(title="Fin-o1-8B Tools") as demo:
261
+ gr.Markdown("""# Fin-o1-8B Tools
262
  Two tabs: Price Prediction (Finnhub with Alpha Vantage fallback) and Equity Research (Alpha Vantage via RapidAPI).""")
263
 
264
  with gr.Tab("Price Prediction"):
 
279
  except Exception as e2:
280
  return f"Error fetching candles: {e2}"
281
  prompt = build_price_prediction_prompt(sym, candles)
282
+ return generate_response_stream(prompt, temperature=temperature, max_new_tokens=int(max_tokens))
 
 
 
 
283
 
284
+ btn.click(on_predict, inputs=[symbol, resolution, count, temp, max_new], outputs=out, show_progress=True)
285
 
286
  with gr.Tab("Equity Research Report"):
287
  symbol2 = gr.Textbox(label="Ticker (e.g., MSFT)", value="MSFT")
 
294
  try:
295
  overview = fetch_alpha_vantage_overview(sym)
296
  prompt = build_equity_research_prompt(sym, overview)
297
+ return generate_response_stream(prompt, temperature=temperature, max_new_tokens=int(max_tokens))
 
298
  except Exception as e:
299
  return f"Error generating report: {e}"
300
 
301
+ btn2.click(on_report, inputs=[symbol2, temp2, max_new2], outputs=out2, show_progress=True)
302
 
303
+ # Enable queue so users see pending/running state
304
+ demo.queue(concurrency_count=1, max_size=8)
305
  return demo
306
 
307