Nexchan commited on
Commit
bf73f38
·
verified ·
1 Parent(s): cc6442f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -33
app.py CHANGED
@@ -1,68 +1,57 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from pydantic import BaseModel
4
- from typing import List, Dict
5
 
6
- # Inisialisasi client model
7
  client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
8
 
9
- def chat_llama(chat_history: List[Dict[str, str]]):
10
- # Mengirim chat_history ke model dan mendapatkan respons
11
  chat_completion = client.chat_completion(
12
  messages=chat_history,
13
- max_tokens=500
 
14
  )
15
- # Menambahkan respons model ke chat_history
16
  chat_history.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
17
-
18
- return chat_history
 
 
 
 
19
 
20
  def chat_mem(message, chat_history):
21
- # Membuat chat_history_role untuk pengolahan model
22
  chat_history_role = [{"role": "system", "content": "You are a helpful assistant."}]
23
 
24
- # Menambahkan pesan dari chat_history ke chat_history_role
25
  if chat_history:
26
- for user_message, assistant_response in chat_history:
27
  chat_history_role.append({"role": "user", "content": user_message})
28
- chat_history_role.append({"role": "assistant", "content": assistant_response})
29
 
30
- # Menambahkan pesan pengguna terbaru
31
  chat_history_role.append({"role": "user", "content": message})
32
 
33
- # Mendapatkan respons dari model
34
  chat_completion = client.chat_completion(
35
  messages=chat_history_role,
36
- max_tokens=500
 
37
  )
 
 
38
 
39
- # Menambahkan respons model ke chat_history_role
40
- chat_history_role.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
 
41
 
42
- # Format ulang chat_history
43
- modified = [entry["content"] for entry in chat_history_role]
44
- chat_history = [(modified[i*2], modified[i*2+1]) for i in range(len(modified)//2)]
45
-
46
- return "", chat_history # Kembalikan pesan kosong dan chat_history yang diperbarui
47
-
48
- def api_chat(chat_history: List[Dict[str, str]]):
49
- # Memanggil chat_llama untuk mendapatkan respons
50
- updated_history = chat_llama(chat_history)
51
- # Mengambil respons terakhir sebagai output
52
- return updated_history[-1] if updated_history else {}
53
 
54
- # Mengatur antarmuka Gradio
55
  with gr.Blocks() as demo:
56
  with gr.Row():
57
  with gr.Column():
58
  chatbot = gr.Chatbot()
59
- msg = gr.Textbox(interactive=True)
60
  with gr.Row():
61
  clear = gr.ClearButton([msg, chatbot], icon="https://img.icons8.com/?size=100&id=Xnx8cxDef16O&format=png&color=000000")
62
  send_btn = gr.Button("Send", variant='primary', icon="https://img.icons8.com/?size=100&id=g8ltXTwIfJ1n&format=png&color=000000")
63
  msg.submit(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
64
  send_btn.click(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
65
-
66
  with gr.Column():
67
  gr.Markdown("### API Testing")
68
  json_input = gr.Textbox(label="Input JSON", placeholder='Enter JSON here', lines=10)
@@ -72,4 +61,4 @@ with gr.Blocks() as demo:
72
  test_btn.click(fn=chat_llama, inputs=json_input, outputs=json_output)
73
 
74
  if __name__ == "__main__":
75
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
3
 
 
4
  client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
5
 
6
+ def chat_llama(chat_history):
 
7
  chat_completion = client.chat_completion(
8
  messages=chat_history,
9
+ max_tokens=500,
10
+ # stream=True
11
  )
12
+ # Append the assistant's response to chat history
13
  chat_history.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
14
+
15
+ # Format chat_history as a list of tuples
16
+ formatted_history = [(chat_history[i*2]["content"], chat_history[i*2+1]["content"])
17
+ for i in range(len(chat_history)//2)]
18
+
19
+ return formatted_history
20
 
21
  def chat_mem(message, chat_history):
 
22
  chat_history_role = [{"role": "system", "content": "You are a helpful assistant."}]
23
 
 
24
  if chat_history:
25
+ for user_message, assistant_message in chat_history:
26
  chat_history_role.append({"role": "user", "content": user_message})
27
+ chat_history_role.append({"role": "assistant", "content": assistant_message})
28
 
 
29
  chat_history_role.append({"role": "user", "content": message})
30
 
 
31
  chat_completion = client.chat_completion(
32
  messages=chat_history_role,
33
+ max_tokens=500,
34
+ # stream=True
35
  )
36
+ assistant_message = chat_completion.choices[0].message.content
37
+ chat_history_role.append({"role": "assistant", "content": assistant_message})
38
 
39
+ # Format chat_history as a list of tuples
40
+ formatted_history = [(chat_history_role[i*2]["content"], chat_history_role[i*2+1]["content"])
41
+ for i in range(len(chat_history_role)//2)]
42
 
43
+ return "", formatted_history
 
 
 
 
 
 
 
 
 
 
44
 
 
45
  with gr.Blocks() as demo:
46
  with gr.Row():
47
  with gr.Column():
48
  chatbot = gr.Chatbot()
49
+ msg = gr.Textbox(placeholder="Type a message...", interactive=True)
50
  with gr.Row():
51
  clear = gr.ClearButton([msg, chatbot], icon="https://img.icons8.com/?size=100&id=Xnx8cxDef16O&format=png&color=000000")
52
  send_btn = gr.Button("Send", variant='primary', icon="https://img.icons8.com/?size=100&id=g8ltXTwIfJ1n&format=png&color=000000")
53
  msg.submit(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
54
  send_btn.click(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
 
55
  with gr.Column():
56
  gr.Markdown("### API Testing")
57
  json_input = gr.Textbox(label="Input JSON", placeholder='Enter JSON here', lines=10)
 
61
  test_btn.click(fn=chat_llama, inputs=json_input, outputs=json_output)
62
 
63
  if __name__ == "__main__":
64
+ demo.launch()