Facemaker commited on
Commit
a124b02
·
1 Parent(s): 95449b4

Rename app.py to app2.py

Browse files
Files changed (1) hide show
  1. app.py → app2.py +19 -12
app.py → app2.py RENAMED
@@ -3,16 +3,18 @@ import gradio as gr
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
- def format_prompt(message, history):
7
- prompt = "<s>"
8
- for user_prompt, bot_response in history:
9
- prompt += f"[INST] {user_prompt} [/INST]"
10
- prompt += f" {bot_response}</s> "
11
- prompt += f"[INST] {message} [/INST]"
12
- return prompt
 
 
13
 
14
  def generate(
15
- prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
16
  ):
17
  temperature = float(temperature)
18
  if temperature < 1e-2:
@@ -28,7 +30,7 @@ def generate(
28
  seed=42,
29
  )
30
 
31
- formatted_prompt = format_prompt(prompt, history)
32
 
33
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
34
  output = ""
@@ -38,15 +40,20 @@ def generate(
38
  yield output
39
  return output
40
 
41
-
 
 
 
 
 
42
  mychatbot = gr.Chatbot(
43
  avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
44
 
45
  demo = gr.ChatInterface(fn=generate,
46
  chatbot=mychatbot,
47
- title="Tomoniai's Mixtral 8x7b Chat",
48
  retry_btn=None,
49
  undo_btn=None
50
  )
51
 
52
- demo.queue().launch(show_api=True)
 
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
+ def format_prompt(message, system_prompt, additional_info, history):
7
+ prompt = "<s>"
8
+ for user_prompt, bot_response in history:
9
+ prompt += f"[INST] {user_prompt} [/INST]"
10
+ prompt += f" {bot_response}</s> "
11
+ prompt += f"[INST] {system_prompt} [/INST]"
12
+ prompt += f"[INFO] {additional_info} [/INFO]"
13
+ prompt += f"[USER] {message} [/USER]</s>"
14
+ return prompt
15
 
16
  def generate(
17
+ prompt, system_prompt, additional_info, history, temperature=0.1, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
18
  ):
19
  temperature = float(temperature)
20
  if temperature < 1e-2:
 
30
  seed=42,
31
  )
32
 
33
+ formatted_prompt = format_prompt(prompt, system_prompt, additional_info, history)
34
 
35
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
36
  output = ""
 
40
  yield output
41
  return output
42
 
43
+ # Ejemplo de uso
44
+ system_prompt = "Crear una historia sobre"
45
+ additional_info = "un viaje a la playa"
46
+ message = "Cuéntame algo divertido"
47
+ history = [("Hola", "¡Hola! ¿En qué puedo ayudarte?")]
48
+
49
  mychatbot = gr.Chatbot(
50
  avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
51
 
52
  demo = gr.ChatInterface(fn=generate,
53
  chatbot=mychatbot,
54
+ title="Facemaker Mixtral 8x7b Chat",
55
  retry_btn=None,
56
  undo_btn=None
57
  )
58
 
59
+ demo.queue().launch(show_api=True)