Akash473 commited on
Commit
bf62845
·
verified ·
1 Parent(s): 161b32e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -26
app.py CHANGED
@@ -3,8 +3,8 @@ import gradio as gr
3
 
4
  client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1")
5
 
6
- # Define the initial prompt
7
- initial_prompt = """
8
  Simulate a GenAI-based interview scenario for the position of Solution Architect with a candidate named "John Doe." Follow these steps:
9
 
10
  1. Start with a friendly greeting and welcome message. Ask John to introduce himself briefly.
@@ -31,11 +31,9 @@ Remember to maintain a natural and polite conversational tone throughout the int
31
  """
32
 
33
  def format_prompt(message, history):
34
- # Start with the initial prompt for the first interaction
35
- if not history:
36
- return initial_prompt
37
-
38
  prompt = "<s>"
 
 
39
  for user_prompt, bot_response in history:
40
  prompt += f"[INST] {user_prompt} [/INST]"
41
  prompt += f" {bot_response}</s> "
@@ -43,7 +41,7 @@ def format_prompt(message, history):
43
  return prompt
44
 
45
  def generate(
46
- message, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
47
  ):
48
  temperature = float(temperature)
49
  if temperature < 1e-2:
@@ -59,15 +57,9 @@ def generate(
59
  seed=42,
60
  )
61
 
62
- formatted_prompt = format_prompt(message, history)
63
 
64
- stream = client.text_generation(
65
- formatted_prompt,
66
- **generate_kwargs,
67
- stream=True,
68
- details=True,
69
- return_full_text=False,
70
- )
71
  output = ""
72
 
73
  for response in stream:
@@ -75,7 +67,7 @@ def generate(
75
  yield output
76
  return output
77
 
78
- additional_inputs = [
79
  gr.Slider(
80
  label="Temperature",
81
  value=0.9,
@@ -111,19 +103,12 @@ additional_inputs = [
111
  step=0.05,
112
  interactive=True,
113
  info="Penalize repeated tokens",
114
- ),
115
  ]
116
 
117
  gr.ChatInterface(
118
  fn=generate,
119
- inputs=[gr.Textbox(label="Interviewer", type="text", default=initial_prompt)],
120
- outputs=gr.Chatbot(
121
- show_label=False,
122
- show_share_button=False,
123
- show_copy_button=True,
124
- likeable=True,
125
- layout="panel",
126
- ),
127
  additional_inputs=additional_inputs,
128
- title="""Mistral 7B""",
129
  ).launch(show_api=False)
 
3
 
4
  client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1")
5
 
6
+ # Your static prompt
7
+ static_prompt = """
8
  Simulate a GenAI-based interview scenario for the position of Solution Architect with a candidate named "John Doe." Follow these steps:
9
 
10
  1. Start with a friendly greeting and welcome message. Ask John to introduce himself briefly.
 
31
  """
32
 
33
  def format_prompt(message, history):
 
 
 
 
34
  prompt = "<s>"
35
+ # Add the static prompt as the first message
36
+ prompt += f"[INST] {static_prompt} [/INST]"
37
  for user_prompt, bot_response in history:
38
  prompt += f"[INST] {user_prompt} [/INST]"
39
  prompt += f" {bot_response}</s> "
 
41
  return prompt
42
 
43
  def generate(
44
+ prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
45
  ):
46
  temperature = float(temperature)
47
  if temperature < 1e-2:
 
57
  seed=42,
58
  )
59
 
60
+ formatted_prompt = format_prompt(prompt, history)
61
 
62
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
 
 
 
 
 
 
63
  output = ""
64
 
65
  for response in stream:
 
67
  yield output
68
  return output
69
 
70
+ additional_inputs=[
71
  gr.Slider(
72
  label="Temperature",
73
  value=0.9,
 
103
  step=0.05,
104
  interactive=True,
105
  info="Penalize repeated tokens",
106
+ )
107
  ]
108
 
109
  gr.ChatInterface(
110
  fn=generate,
111
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
 
 
 
 
 
 
 
112
  additional_inputs=additional_inputs,
113
+ title="""Mistral 7B"""
114
  ).launch(show_api=False)