Nexchan commited on
Commit
fdb8106
·
verified ·
1 Parent(s): bf73f38

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -53
app.py CHANGED
@@ -1,64 +1,47 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
 
 
5
 
6
- def chat_llama(chat_history):
7
- chat_completion = client.chat_completion(
8
- messages=chat_history,
9
- max_tokens=500,
10
- # stream=True
11
- )
12
- # Append the assistant's response to chat history
13
- chat_history.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
14
-
15
- # Format chat_history as a list of tuples
16
- formatted_history = [(chat_history[i*2]["content"], chat_history[i*2+1]["content"])
17
- for i in range(len(chat_history)//2)]
18
-
19
- return formatted_history
 
 
 
 
 
 
 
 
 
 
20
 
21
- def chat_mem(message, chat_history):
22
- chat_history_role = [{"role": "system", "content": "You are a helpful assistant."}]
23
-
24
- if chat_history:
25
- for user_message, assistant_message in chat_history:
26
- chat_history_role.append({"role": "user", "content": user_message})
27
- chat_history_role.append({"role": "assistant", "content": assistant_message})
28
-
29
- chat_history_role.append({"role": "user", "content": message})
30
-
31
- chat_completion = client.chat_completion(
32
- messages=chat_history_role,
33
- max_tokens=500,
34
- # stream=True
35
- )
36
- assistant_message = chat_completion.choices[0].message.content
37
- chat_history_role.append({"role": "assistant", "content": assistant_message})
38
-
39
- # Format chat_history as a list of tuples
40
- formatted_history = [(chat_history_role[i*2]["content"], chat_history_role[i*2+1]["content"])
41
- for i in range(len(chat_history_role)//2)]
42
-
43
- return "", formatted_history
44
 
45
  with gr.Blocks() as demo:
46
- with gr.Row():
47
- with gr.Column():
48
- chatbot = gr.Chatbot()
49
- msg = gr.Textbox(placeholder="Type a message...", interactive=True)
50
- with gr.Row():
51
- clear = gr.ClearButton([msg, chatbot], icon="https://img.icons8.com/?size=100&id=Xnx8cxDef16O&format=png&color=000000")
52
- send_btn = gr.Button("Send", variant='primary', icon="https://img.icons8.com/?size=100&id=g8ltXTwIfJ1n&format=png&color=000000")
53
- msg.submit(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
54
- send_btn.click(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
55
- with gr.Column():
56
- gr.Markdown("### API Testing")
57
- json_input = gr.Textbox(label="Input JSON", placeholder='Enter JSON here', lines=10)
58
- json_output = gr.Textbox(label="Output JSON", lines=10, interactive=False)
59
- test_btn = gr.Button("Test API")
60
 
61
- test_btn.click(fn=chat_llama, inputs=json_input, outputs=json_output)
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ client = InferenceClient(
5
+ "meta-llama/Meta-Llama-3-8B-Instruct",
6
+ )
7
 
8
+ def chat_mem(message,chat_history):
9
+
10
+ print(len(chat_history))
11
+ chat_history_role = [{"role": "system", "content": "You are a helpful assistant." },]
12
+ if chat_history != []:
13
+ for i in range(len(chat_history)):
14
+ chat_history_role.append({"role": "user", "content": chat_history[i][0]})
15
+ chat_history_role.append({"role": "assistant", "content": chat_history[i][1]})
16
+ chat_history_role.append({"role": "user", "content": message})
17
+
18
+
19
+ chat_completion = client.chat_completion(
20
+ messages=chat_history_role,
21
+ max_tokens=500,
22
+ # stream=True
23
+ )
24
+ chat_history_role.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
25
+ print(chat_history_role)
26
+
27
+ modified = map(lambda x: x["content"], chat_history_role)
28
+ a = list(modified)
29
+ chat_history=[(a[i*2+1], a[i*2+2]) for i in range(len(a)//2)]
30
+
31
+ return "", chat_history
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  with gr.Blocks() as demo:
35
+ with gr.Row():
36
+ with gr.Column():
37
+ chatbot = gr.Chatbot()
38
+ msg = gr.Textbox(interactive=True, )
39
+ with gr.Row():
40
+ clear = gr.ClearButton([msg, chatbot], icon="https://img.icons8.com/?size=100&id=Xnx8cxDef16O&format=png&color=000000")
41
+ send_btn = gr.Button("Send", variant='primary', icon="https://img.icons8.com/?size=100&id=g8ltXTwIfJ1n&format=png&color=000000")
42
+ msg.submit(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
43
+ send_btn.click(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
 
 
 
 
 
44
 
 
45
 
46
  if __name__ == "__main__":
47
  demo.launch()