Nullpointer-KK commited on
Commit
1b56e81
·
verified ·
1 Parent(s): eb8c1af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Available open-source base models (completion style)
5
  MODEL_CHOICES = {
6
  "Mistral 7B Instruct (mistralai/Mistral-7B-Instruct-v0.2)": "mistralai/Mistral-7B-Instruct-v0.2",
7
  "Falcon 7B Instruct (tiiuae/falcon-7b-instruct)": "tiiuae/falcon-7b-instruct",
@@ -15,7 +15,7 @@ def complete_text(prompt, max_tokens, temperature, top_p, model_choice, hf_token
15
  Streams output token-by-token.
16
  """
17
  if not hf_token or not hf_token.token:
18
- yield "⚠️ Please log in with your Hugging Face account (for gated models like LLaMA-2)."
19
  return
20
 
21
  model_id = MODEL_CHOICES[model_choice]
@@ -31,9 +31,8 @@ def complete_text(prompt, max_tokens, temperature, top_p, model_choice, hf_token
31
  repetition_penalty=1.0,
32
  )
33
 
34
- for event in stream:
35
- # Each event is a string chunk
36
- response_text += event
37
  yield response_text
38
 
39
 
@@ -72,9 +71,10 @@ with gr.Blocks() as demo:
72
  lines=15,
73
  )
74
 
 
75
  submit.click(
76
  fn=complete_text,
77
- inputs=[prompt, max_tokens, temperature, top_p, model_choice, gr.OAuthToken()],
78
  outputs=output,
79
  )
80
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Available open-source base models
5
  MODEL_CHOICES = {
6
  "Mistral 7B Instruct (mistralai/Mistral-7B-Instruct-v0.2)": "mistralai/Mistral-7B-Instruct-v0.2",
7
  "Falcon 7B Instruct (tiiuae/falcon-7b-instruct)": "tiiuae/falcon-7b-instruct",
 
15
  Streams output token-by-token.
16
  """
17
  if not hf_token or not hf_token.token:
18
+ yield "⚠️ Please log in with your Hugging Face account (needed for gated models like LLaMA-2)."
19
  return
20
 
21
  model_id = MODEL_CHOICES[model_choice]
 
31
  repetition_penalty=1.0,
32
  )
33
 
34
+ for chunk in stream:
35
+ response_text += chunk
 
36
  yield response_text
37
 
38
 
 
71
  lines=15,
72
  )
73
 
74
+ # ✅ Pass gr.OAuthToken as an input type (no manual instantiation)
75
  submit.click(
76
  fn=complete_text,
77
+ inputs=[prompt, max_tokens, temperature, top_p, model_choice, gr.OAuthToken],
78
  outputs=output,
79
  )
80