Facemaker commited on
Commit
66ae2f2
·
1 Parent(s): ef917ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -1,18 +1,19 @@
1
- from huggingface_hub import InferenceClient
2
  import gradio as gr
 
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
- def format_prompt(message, history):
7
- prompt = "<s>"
8
- for user_prompt, bot_response in history:
9
- prompt += f"[INST] {user_prompt} [/INST]"
10
- prompt += f" {bot_response}</s> "
11
- prompt += f"[INST] {message} [/INST]"
12
- return prompt
13
 
14
  def generate(
15
- prompt, history, temperature=0.2, max_new_tokens=4096, top_p=0.95, repetition_penalty=1.0,
16
  ):
17
  temperature = float(temperature)
18
  if temperature < 1e-2:
@@ -28,7 +29,7 @@ def generate(
28
  seed=42,
29
  )
30
 
31
- formatted_prompt = format_prompt(prompt, history)
32
 
33
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
34
  output = ""
@@ -42,7 +43,7 @@ def generate(
42
  mychatbot = gr.Chatbot(
43
  avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
44
 
45
- demo = gr.ChatInterface(fn=generate,
46
  chatbot=mychatbot,
47
  title="Mixtral 8x7b Chat",
48
  retry_btn=None,
 
1
+ import os
2
  import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
 
5
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
 
7
+ def format_prompt(message, history, context):
8
+ prompt = " "
9
+ for user_prompt, bot_response in history:
10
+ prompt += f"[INST] {user_prompt} [/{{
11
+ prompt += f" {context}
12
+ prompt += f"[INST] {message} [/
13
+ return prompt
14
 
15
  def generate(
16
+ prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, context="",
17
  ):
18
  temperature = float(temperature)
19
  if temperature < 1e-2:
 
29
  seed=42,
30
  )
31
 
32
+ formatted_prompt = format_prompt(prompt, history, context)
33
 
34
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
35
  output = ""
 
43
  mychatbot = gr.Chatbot(
44
  avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
45
 
46
+ demo = gr.ChatInterface(fn=generate,
47
  chatbot=mychatbot,
48
  title="Mixtral 8x7b Chat",
49
  retry_btn=None,