Facemaker commited on
Commit
e8b1310
·
1 Parent(s): e427869

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -18
app.py CHANGED
@@ -3,18 +3,17 @@ import gradio as gr
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
- def format_prompt(message, system_prompt, additional_info, history):
7
- prompt = "<s>"
8
- for user_prompt, bot_response in history:
9
- prompt += f"[INST] {user_prompt} [/INST]"
10
- prompt += f" {bot_response}</s> "
11
- prompt += f"[INST] {system_prompt} [/INST]"
12
- prompt += f"[INFO] {additional_info} [/INFO]"
13
- prompt += f"[USER] {message} [/USER]</s>"
14
- return prompt
15
 
16
  def generate(
17
- prompt, system_prompt, additional_info, history, temperature=0.1, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
18
  ):
19
  temperature = float(temperature)
20
  if temperature < 1e-2:
@@ -30,7 +29,7 @@ def generate(
30
  seed=42,
31
  )
32
 
33
- formatted_prompt = format_prompt(prompt, system_prompt, additional_info, history)
34
 
35
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
36
  output = ""
@@ -40,12 +39,7 @@ def generate(
40
  yield output
41
  return output
42
 
43
- # Ejemplo de uso
44
- system_prompt = "te llamas Facemaker"
45
- additional_info = "eres un ser humano"
46
- message = "Cuéntame algo divertido"
47
- history = [("Hola", "¡Hola! ¿En qué puedo ayudarte?")]
48
-
49
  mychatbot = gr.Chatbot(
50
  avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
51
 
@@ -56,4 +50,4 @@ demo = gr.ChatInterface(fn=generate,
56
  undo_btn=None
57
  )
58
 
59
- demo.queue().launch(show_api=True)
 
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
+ def format_prompt(message, sys, history):
7
+ prompt = "<s>"
8
+ sys = "eres Facemaker, un joven chieno que vive en la ciudad del vino, puedes ayudar en lo que se te solicita"
9
+ for user_prompt, bot_response in history:
10
+ prompt += f"[INST] {user_prompt} [/INST]"
11
+ prompt += f" {bot_response}</s> "
12
+ prompt += f"[INST] {message} [/INST]"
13
+ return prompt
 
14
 
15
  def generate(
16
+ prompt, sys, history, temperature=0.1, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
17
  ):
18
  temperature = float(temperature)
19
  if temperature < 1e-2:
 
29
  seed=42,
30
  )
31
 
32
+ formatted_prompt = format_prompt(prompt, sys, history)
33
 
34
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
35
  output = ""
 
39
  yield output
40
  return output
41
 
42
+
 
 
 
 
 
43
  mychatbot = gr.Chatbot(
44
  avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
45
 
 
50
  undo_btn=None
51
  )
52
 
53
+ demo.queue().launch(show_api=True)