Akash473 commited on
Commit
5460aa1
·
verified ·
1 Parent(s): 3a1b242

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -17
app.py CHANGED
@@ -1,18 +1,15 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
 
4
- client = InferenceClient(
5
- "mistralai/Mistral-7B-Instruct-v0.1"
6
- )
7
-
8
 
9
  def format_prompt(message, history):
10
- prompt = "<s>"
11
- for user_prompt, bot_response in history:
12
- prompt += f"[INST] {user_prompt} [/INST]"
13
- prompt += f" {bot_response}</s> "
14
- prompt += f"[INST] {message} [/INST]"
15
- return prompt
16
 
17
  def generate(
18
  prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
@@ -33,7 +30,9 @@ def generate(
33
 
34
  formatted_prompt = format_prompt(prompt, history)
35
 
36
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
 
 
37
  output = ""
38
 
39
  for response in stream:
@@ -41,8 +40,7 @@ def generate(
41
  yield output
42
  return output
43
 
44
-
45
- additional_inputs=[
46
  gr.Slider(
47
  label="Temperature",
48
  value=0.9,
@@ -78,13 +76,21 @@ additional_inputs=[
78
  step=0.05,
79
  interactive=True,
80
  info="Penalize repeated tokens",
81
- )
82
  ]
83
 
 
84
 
85
  gr.ChatInterface(
86
  fn=generate,
87
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
 
 
 
 
 
 
 
88
  additional_inputs=additional_inputs,
89
- title="""Gen-AIInterviewUseCase featuring Mistral 7B"""
90
- ).launch(show_api=False)
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
 
4
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1")
 
 
 
5
 
6
  def format_prompt(message, history):
7
+ prompt = "<s>"
8
+ for user_prompt, bot_response in history:
9
+ prompt += f"[INST] {user_prompt} [/INST]"
10
+ prompt += f" {bot_response}</s> "
11
+ prompt += f"[INST] {message} [/INST]"
12
+ return prompt
13
 
14
  def generate(
15
  prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
 
30
 
31
  formatted_prompt = format_prompt(prompt, history)
32
 
33
+ stream = client.text_generation(
34
+ formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False
35
+ )
36
  output = ""
37
 
38
  for response in stream:
 
40
  yield output
41
  return output
42
 
43
+ additional_inputs = [
 
44
  gr.Slider(
45
  label="Temperature",
46
  value=0.9,
 
76
  step=0.05,
77
  interactive=True,
78
  info="Penalize repeated tokens",
79
+ ),
80
  ]
81
 
82
+ initial_prompt = "Hello John! Welcome to our simulation interview for the Solution Architect position. I understand you may have some initial thoughts or questions about the job requirements. Feel free to share those with me."
83
 
84
  gr.ChatInterface(
85
  fn=generate,
86
+ inputs=[gr.Textbox(default=initial_prompt, label="Initial Prompt", type="text")],
87
+ chatbot=gr.Chatbot(
88
+ show_label=False,
89
+ show_share_button=False,
90
+ show_copy_button=True,
91
+ likeable=True,
92
+ layout="panel",
93
+ ),
94
  additional_inputs=additional_inputs,
95
+ title="""Gen-AIInterviewUseCase featuring Mistral 7B""",
96
+ ).launch(show_api=False)