cody82 commited on
Commit
101f40d
·
verified ·
1 Parent(s): 577e2ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -33
app.py CHANGED
@@ -1,12 +1,8 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("cody82/innopolis_bot_model")
8
 
9
-
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -15,50 +11,40 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
27
 
 
28
  response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
 
36
  ):
37
- token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
  ],
 
 
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
4
  client = InferenceClient("cody82/innopolis_bot_model")
5
 
 
6
  def respond(
7
  message,
8
  history: list[tuple[str, str]],
 
11
  temperature,
12
  top_p,
13
  ):
14
+ # Собираем историю в текст
15
+ full_prompt = system_message.strip() + "\n"
16
 
17
+ for user_msg, bot_msg in history:
18
+ if user_msg:
19
+ full_prompt += f"User: {user_msg}\n"
20
+ if bot_msg:
21
+ full_prompt += f"Bot: {bot_msg}\n"
22
 
23
+ full_prompt += f"User: {message}\nBot:"
24
 
25
+ # Генерируем ответ от модели
26
  response = ""
27
+ for token in client.text_generation(
28
+ prompt=full_prompt,
29
+ max_new_tokens=max_tokens,
 
 
30
  temperature=temperature,
31
  top_p=top_p,
32
+ stream=True,
33
  ):
 
 
34
  response += token
35
  yield response
36
 
 
 
 
 
37
  demo = gr.ChatInterface(
38
  respond,
39
  additional_inputs=[
40
+ gr.Textbox(value="You are a helpful assistant.", label="System message"),
41
+ gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max new tokens"),
42
+ gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
43
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-p"),
 
 
 
 
 
 
44
  ],
45
+ title="Innopolis Bot",
46
+ description="Чат с кастомной моделью cody82/innopolis_bot_model",
47
  )
48
 
 
49
  if __name__ == "__main__":
50
  demo.launch()