Nexchan commited on
Commit
5c29e6d
·
verified ·
1 Parent(s): 8bfb8b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -32
app.py CHANGED
@@ -1,47 +1,80 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
3
 
4
- client = InferenceClient(
5
- "meta-llama/Meta-Llama-3-8B-Instruct",
6
- )
7
 
8
- def chat_mem(message,chat_history):
 
 
 
9
 
10
- print(len(chat_history))
11
- chat_history_role = [{"role": "system", "content": "You are a helpful assistant." },]
12
- if chat_history != []:
13
- for i in range(len(chat_history)):
14
- chat_history_role.append({"role": "user", "content": chat_history[i][0]})
15
- chat_history_role.append({"role": "assistant", "content": chat_history[i][1]})
16
- chat_history_role.append({"role": "user", "content": message})
17
-
18
 
19
- chat_completion = client.chat_completion(
20
- messages=chat_history_role,
21
- max_tokens=500,
22
- # stream=True
23
- )
24
- chat_history_role.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
25
- print(chat_history_role)
26
 
27
- modified = map(lambda x: x["content"], chat_history_role)
28
- a = list(modified)
29
- chat_history=[(a[i*2+1], a[i*2+2]) for i in range(len(a)//2)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- return "", chat_history
32
 
 
 
 
 
 
33
 
 
34
  with gr.Blocks() as demo:
35
- with gr.Row():
36
- with gr.Column():
37
- chatbot = gr.Chatbot()
38
- msg = gr.Textbox(interactive=True, )
39
- with gr.Row():
40
- clear = gr.ClearButton([msg, chatbot], icon="https://img.icons8.com/?size=100&id=Xnx8cxDef16O&format=png&color=000000")
41
- send_btn = gr.Button("Send", variant='primary', icon="https://img.icons8.com/?size=100&id=g8ltXTwIfJ1n&format=png&color=000000")
42
- msg.submit(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
43
- send_btn.click(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
 
 
44
 
 
 
 
 
 
45
 
46
  if __name__ == "__main__":
47
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from pydantic import BaseModel
4
+ from typing import List, Dict
5
 
6
+ # Inisialisasi client model
7
+ client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
 
8
 
9
+ # Model untuk data input API
10
+ class ChatMessage(BaseModel):
11
+ role: str
12
+ content: str
13
 
14
+ def chat_llama(chat_history: List[Dict[str, str]]):
15
+ # Mengirim chat_history ke model dan mendapatkan respons
16
+ chat_completion = client.chat_completion(
17
+ messages=chat_history,
18
+ max_tokens=500
19
+ )
20
+ # Menambahkan respons model ke chat_history
21
+ chat_history.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
22
 
23
+ return chat_history
 
 
 
 
 
 
24
 
25
+ def chat_mem(message, chat_history):
26
+ # Membuat chat_history_role untuk pengolahan model
27
+ chat_history_role = [{"role": "system", "content": "You are a helpful assistant."}]
28
+
29
+ # Menambahkan pesan dari chat_history ke chat_history_role
30
+ if chat_history:
31
+ for user_message, assistant_response in chat_history:
32
+ chat_history_role.append({"role": "user", "content": user_message})
33
+ chat_history_role.append({"role": "assistant", "content": assistant_response})
34
+
35
+ # Menambahkan pesan pengguna terbaru
36
+ chat_history_role.append({"role": "user", "content": message})
37
+
38
+ # Mendapatkan respons dari model
39
+ chat_completion = client.chat_completion(
40
+ messages=chat_history_role,
41
+ max_tokens=500
42
+ )
43
+
44
+ # Menambahkan respons model ke chat_history_role
45
+ chat_history_role.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
46
+
47
+ # Format ulang chat_history
48
+ modified = [entry["content"] for entry in chat_history_role]
49
+ chat_history = [(modified[i*2], modified[i*2+1]) for i in range(len(modified)//2)]
50
 
51
+ return "", chat_history # Kembalikan pesan kosong dan chat_history yang diperbarui
52
 
53
+ def api_chat(chat_history: List[Dict[str, str]]):
54
+ # Memanggil chat_llama untuk mendapatkan respons
55
+ updated_history = chat_llama(chat_history)
56
+ # Mengambil respons terakhir sebagai output
57
+ return updated_history[-1] if updated_history else {}
58
 
59
+ # Mengatur antarmuka Gradio
60
  with gr.Blocks() as demo:
61
+ gr.Markdown("## Chat Demo")
62
+ with gr.Row():
63
+ with gr.Column():
64
+ # Bagian Antarmuka Pengguna
65
+ chatbot = gr.Chatbot()
66
+ msg = gr.Textbox(placeholder="Type your message here...", interactive=True)
67
+ with gr.Row():
68
+ clear = gr.ClearButton([msg, chatbot], icon="https://img.icons8.com/?size=100&id=Xnx8cxDef16O&format=png&color=000000")
69
+ send_btn = gr.Button("Send", variant='primary', icon="https://img.icons8.com/?size=100&id=g8ltXTwIfJ1n&format=png&color=000000")
70
+ msg.submit(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
71
+ send_btn.click(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
72
 
73
+ gr.Markdown("## API Endpoint for Testing")
74
+ gr.Markdown("### Send a POST request to `/api/chat` with the following JSON body:")
75
+ gr.Markdown("```json\n[ { \"role\": \"user\", \"content\": \"Hello, how are you?\" }, { \"role\": \"assistant\", \"content\": \"I'm fine, thank you! How can I assist you today?\" }, { \"role\": \"user\", \"content\": \"Can you tell me a joke?\" } ]\n```")
76
+ gr.Markdown("### API Response:")
77
+ gr.Interface(fn=api_chat, inputs="json", outputs="json").launch(share=True)
78
 
79
  if __name__ == "__main__":
80
  demo.launch()