Nexchan commited on
Commit
1635dbd
·
verified ·
1 Parent(s): fdb8106

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -31
app.py CHANGED
@@ -1,47 +1,69 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
  client = InferenceClient(
5
  "meta-llama/Meta-Llama-3-8B-Instruct",
6
  )
7
 
8
- def chat_mem(message,chat_history):
 
 
 
 
 
 
 
9
 
10
- print(len(chat_history))
11
- chat_history_role = [{"role": "system", "content": "You are a helpful assistant." },]
12
- if chat_history != []:
13
- for i in range(len(chat_history)):
14
- chat_history_role.append({"role": "user", "content": chat_history[i][0]})
15
- chat_history_role.append({"role": "assistant", "content": chat_history[i][1]})
16
- chat_history_role.append({"role": "user", "content": message})
17
-
 
 
 
 
 
 
 
18
 
19
- chat_completion = client.chat_completion(
20
- messages=chat_history_role,
21
- max_tokens=500,
22
- # stream=True
23
- )
24
- chat_history_role.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
25
- print(chat_history_role)
26
 
27
- modified = map(lambda x: x["content"], chat_history_role)
28
- a = list(modified)
29
- chat_history=[(a[i*2+1], a[i*2+2]) for i in range(len(a)//2)]
30
-
31
- return "", chat_history
32
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  with gr.Blocks() as demo:
35
- with gr.Row():
36
- with gr.Column():
37
- chatbot = gr.Chatbot()
38
- msg = gr.Textbox(interactive=True, )
39
- with gr.Row():
40
- clear = gr.ClearButton([msg, chatbot], icon="https://img.icons8.com/?size=100&id=Xnx8cxDef16O&format=png&color=000000")
41
- send_btn = gr.Button("Send", variant='primary', icon="https://img.icons8.com/?size=100&id=g8ltXTwIfJ1n&format=png&color=000000")
42
- msg.submit(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
43
- send_btn.click(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
44
-
 
 
 
 
 
45
 
46
  if __name__ == "__main__":
47
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import json
4
 
5
  client = InferenceClient(
6
  "meta-llama/Meta-Llama-3-8B-Instruct",
7
  )
8
 
9
+ def chat_llama(chat_history):
10
+ chat_completion = client.chat_completion(
11
+ messages=chat_history,
12
+ max_tokens=500,
13
+ )
14
+ chat_history.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
15
+ print(chat_history)
16
+ return chat_history
17
 
18
+ def chat_mem(message, chat_history):
19
+ print(len(chat_history))
20
+ chat_history_role = [{"role": "system", "content": "You are a helpful assistant."},]
21
+ if chat_history != []:
22
+ for i in range(len(chat_history)):
23
+ chat_history_role.append({"role": "user", "content": chat_history[i][0]})
24
+ chat_history_role.append({"role": "assistant", "content": chat_history[i][1]})
25
+ chat_history_role.append({"role": "user", "content": message})
26
+
27
+ chat_completion = client.chat_completion(
28
+ messages=chat_history_role,
29
+ max_tokens=500,
30
+ )
31
+ chat_history_role.append({"role": "assistant", "content": chat_completion.choices[0].message.content})
32
+ print(chat_history_role)
33
 
34
+ modified = map(lambda x: x["content"], chat_history_role)
35
+ a = list(modified)
36
+ chat_history = [(a[i*2+1], a[i*2+2]) for i in range(len(a)//2)]
 
 
 
 
37
 
38
+ return "", chat_history
 
 
 
 
39
 
40
+ def process_json(json_input):
41
+ try:
42
+ chat_history = json.loads(json_input)
43
+ if not isinstance(chat_history, list):
44
+ raise ValueError("Input should be a list of message dictionaries.")
45
+ except (json.JSONDecodeError, ValueError) as e:
46
+ return f"Error parsing JSON: {str(e)}", ""
47
+
48
+ chat_history = chat_llama(chat_history)
49
+ return json.dumps(chat_history, indent=2), ""
50
 
51
  with gr.Blocks() as demo:
52
+ with gr.Row():
53
+ with gr.Column():
54
+ chatbot = gr.Chatbot()
55
+ msg = gr.Textbox(interactive=True, )
56
+ with gr.Row():
57
+ clear = gr.ClearButton([msg, chatbot], icon="https://img.icons8.com/?size=100&id=Xnx8cxDef16O&format=png&color=000000")
58
+ send_btn = gr.Button("Send", variant='primary', icon="https://img.icons8.com/?size=100&id=g8ltXTwIfJ1n&format=png&color=000000")
59
+ msg.submit(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
60
+ send_btn.click(fn=chat_mem, inputs=[msg, chatbot], outputs=[msg, chatbot])
61
+
62
+ with gr.Column():
63
+ json_input = gr.Textbox(placeholder='Input JSON here...', interactive=True, lines=10)
64
+ json_output = gr.Textbox(label='Output JSON', interactive=False, lines=10)
65
+ process_btn = gr.Button("Process JSON", variant='primary', icon="https://img.icons8.com/?size=100&id=g8ltXTwIfJ1n&format=png&color=000000")
66
+ process_btn.click(fn=process_json, inputs=json_input, outputs=[json_output])
67
 
68
  if __name__ == "__main__":
69
  demo.launch()