Nexchan commited on
Commit
75bef96
·
verified ·
1 Parent(s): cd36780

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -10
app.py CHANGED
@@ -1,45 +1,59 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
  from typing import List, Dict
4
 
5
  # Inisialisasi client model
6
  client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
7
 
 
 
 
 
 
8
  def chat_llama(chat_history: List[Dict[str, str]]):
9
- print("Calling chat_llama with history:", chat_history)
10
  chat_completion = client.chat_completion(
11
  messages=chat_history,
12
  max_tokens=500
13
  )
 
14
  chat_history.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
15
- print("Updated history:", chat_history)
16
  return chat_history
17
 
18
  def chat_mem(message, chat_history):
19
- print("Received message:", message)
20
- print("Current chat history:", chat_history)
21
-
22
  chat_history_role = [{"role": "system", "content": "You are a helpful assistant."}]
 
 
23
  if chat_history:
24
  for user_message, assistant_response in chat_history:
25
  chat_history_role.append({"role": "user", "content": user_message})
26
  chat_history_role.append({"role": "assistant", "content": assistant_response})
27
 
 
28
  chat_history_role.append({"role": "user", "content": message})
 
 
29
  chat_completion = client.chat_completion(
30
  messages=chat_history_role,
31
  max_tokens=500
32
  )
 
 
33
  chat_history_role.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
34
- print("Updated chat history role:", chat_history_role)
35
 
 
36
  modified = [entry["content"] for entry in chat_history_role]
37
  chat_history = [(modified[i*2], modified[i*2+1]) for i in range(len(modified)//2)]
38
- return "", chat_history
 
39
 
40
  def api_chat(chat_history: List[Dict[str, str]]):
41
- print("Received API request with history:", chat_history)
42
  updated_history = chat_llama(chat_history)
 
43
  return updated_history[-1] if updated_history else {}
44
 
45
  # Mengatur antarmuka Gradio
@@ -47,6 +61,7 @@ with gr.Blocks() as demo:
47
  gr.Markdown("## Chat Demo")
48
  with gr.Row():
49
  with gr.Column():
 
50
  chatbot = gr.Chatbot()
51
  msg = gr.Textbox(placeholder="Type your message here...", interactive=True)
52
  with gr.Row():
@@ -59,7 +74,7 @@ with gr.Blocks() as demo:
59
  gr.Markdown("### Send a POST request to `/api/chat` with the following JSON body:")
60
  gr.Markdown("```json\n[ { \"role\": \"user\", \"content\": \"Hello, how are you?\" }, { \"role\": \"assistant\", \"content\": \"I'm fine, thank you! How can I assist you today?\" }, { \"role\": \"user\", \"content\": \"Can you tell me a joke?\" } ]\n```")
61
  gr.Markdown("### API Response:")
62
- gr.Interface(fn=api_chat, inputs="json", outputs="json").launch(share=True)
63
 
64
  if __name__ == "__main__":
65
- demo.launch(port=7861)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from pydantic import BaseModel
4
  from typing import List, Dict
5
 
6
  # Inisialisasi client model
7
  client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
8
 
9
+ # Model untuk data input API
10
+ class ChatMessage(BaseModel):
11
+ role: str
12
+ content: str
13
+
14
  def chat_llama(chat_history: List[Dict[str, str]]):
15
+ # Mengirim chat_history ke model dan mendapatkan respons
16
  chat_completion = client.chat_completion(
17
  messages=chat_history,
18
  max_tokens=500
19
  )
20
+ # Menambahkan respons model ke chat_history
21
  chat_history.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
22
+
23
  return chat_history
24
 
25
  def chat_mem(message, chat_history):
26
+ # Membuat chat_history_role untuk pengolahan model
 
 
27
  chat_history_role = [{"role": "system", "content": "You are a helpful assistant."}]
28
+
29
+ # Menambahkan pesan dari chat_history ke chat_history_role
30
  if chat_history:
31
  for user_message, assistant_response in chat_history:
32
  chat_history_role.append({"role": "user", "content": user_message})
33
  chat_history_role.append({"role": "assistant", "content": assistant_response})
34
 
35
+ # Menambahkan pesan pengguna terbaru
36
  chat_history_role.append({"role": "user", "content": message})
37
+
38
+ # Mendapatkan respons dari model
39
  chat_completion = client.chat_completion(
40
  messages=chat_history_role,
41
  max_tokens=500
42
  )
43
+
44
+ # Menambahkan respons model ke chat_history_role
45
  chat_history_role.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
 
46
 
47
+ # Format ulang chat_history
48
  modified = [entry["content"] for entry in chat_history_role]
49
  chat_history = [(modified[i*2], modified[i*2+1]) for i in range(len(modified)//2)]
50
+
51
+ return "", chat_history # Kembalikan pesan kosong dan chat_history yang diperbarui
52
 
53
  def api_chat(chat_history: List[Dict[str, str]]):
54
+ # Memanggil chat_llama untuk mendapatkan respons
55
  updated_history = chat_llama(chat_history)
56
+ # Mengambil respons terakhir sebagai output
57
  return updated_history[-1] if updated_history else {}
58
 
59
  # Mengatur antarmuka Gradio
 
61
  gr.Markdown("## Chat Demo")
62
  with gr.Row():
63
  with gr.Column():
64
+ # Bagian Antarmuka Pengguna
65
  chatbot = gr.Chatbot()
66
  msg = gr.Textbox(placeholder="Type your message here...", interactive=True)
67
  with gr.Row():
 
74
  gr.Markdown("### Send a POST request to `/api/chat` with the following JSON body:")
75
  gr.Markdown("```json\n[ { \"role\": \"user\", \"content\": \"Hello, how are you?\" }, { \"role\": \"assistant\", \"content\": \"I'm fine, thank you! How can I assist you today?\" }, { \"role\": \"user\", \"content\": \"Can you tell me a joke?\" } ]\n```")
76
  gr.Markdown("### API Response:")
77
+ gr.Interface(fn=api_chat, inputs="json", outputs="json").launch()
78
 
79
  if __name__ == "__main__":
80
+ demo.launch()