Floncer commited on
Commit
639fcbc
·
verified ·
1 Parent(s): 484b4f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -7
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- import random
4
 
5
  # 50 лучших моделей для выбора
6
  MODELS = {
@@ -56,20 +56,41 @@ MODELS = {
56
  "⚙️ FLAN-UL2": "google/flan-ul2"
57
  }
58
 
59
- def format_model_name(name):
60
- return f"{name[:30]}..." if len(name) > 30 else name
61
-
62
  def respond(message, history, model, system_message, max_tokens, temperature, top_p, hf_token):
63
  try:
64
- client = InferenceClient(token=hf_token.token if hf_token else None, model=MODELS[model])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
- messages = [{"role": "system", "content": system_message}] + history + [{"role": "user", "content": message}]
 
 
 
67
 
68
  response = ""
69
- for chunk in client.chat_completion(messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p):
 
 
 
 
 
 
70
  if chunk.choices and chunk.choices[0].delta.content:
71
  response += chunk.choices[0].delta.content
72
  yield response
 
73
  except Exception as e:
74
  yield f"❌ Error: {str(e)}"
75
 
@@ -125,6 +146,7 @@ button.primary:hover {
125
  }
126
  """
127
 
 
128
  with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
129
  with gr.Sidebar(open=True):
130
  gr.Markdown("""
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import os
4
 
5
  # 50 лучших моделей для выбора
6
  MODELS = {
 
56
  "⚙️ FLAN-UL2": "google/flan-ul2"
57
  }
58
 
 
 
 
59
  def respond(message, history, model, system_message, max_tokens, temperature, top_p, hf_token):
60
  try:
61
+ # Исправление: проверяем тип токена
62
+ token = None
63
+ if hf_token:
64
+ if hasattr(hf_token, 'token'):
65
+ token = hf_token.token
66
+ elif isinstance(hf_token, str):
67
+ token = hf_token
68
+ else:
69
+ token = os.getenv("HF_TOKEN") # Берем из переменных окружения
70
+
71
+ if not token:
72
+ yield "❌ Please login with your Hugging Face account or set HF_TOKEN"
73
+ return
74
+
75
+ client = InferenceClient(token=token, model=MODELS[model])
76
 
77
+ messages = [{"role": "system", "content": system_message}]
78
+ for msg in history:
79
+ messages.append({"role": msg["role"], "content": msg["content"]})
80
+ messages.append({"role": "user", "content": message})
81
 
82
  response = ""
83
+ for chunk in client.chat_completion(
84
+ messages,
85
+ max_tokens=max_tokens,
86
+ stream=True,
87
+ temperature=temperature,
88
+ top_p=top_p
89
+ ):
90
  if chunk.choices and chunk.choices[0].delta.content:
91
  response += chunk.choices[0].delta.content
92
  yield response
93
+
94
  except Exception as e:
95
  yield f"❌ Error: {str(e)}"
96
 
 
146
  }
147
  """
148
 
149
+ # Создаем интерфейс
150
  with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
151
  with gr.Sidebar(open=True):
152
  gr.Markdown("""