| | import aiohttp |
| | import asyncio |
| | import json |
| | import gradio as gr |
| | import os |
| |
|
| | |
| | BASE_URL = os.getenv("URL") |
| | if not BASE_URL: |
| | raise ValueError("Environment variable 'URL' not set") |
| | TOKEN_URL = BASE_URL + "/get-token" |
| | CHAT_URL = BASE_URL + "/conversation" |
| |
|
| | |
| | token = "" |
| | messHistory: list = [] |
| |
|
| | async def chat(messList): |
| | """Async function to send and receive messages with the server.""" |
| | global token |
| | async with aiohttp.ClientSession() as session: |
| | |
| | if token == "": |
| | async with session.get(TOKEN_URL) as resp: |
| | data = await resp.json() |
| | token = data["token"] |
| |
|
| | body = { |
| | "token": token, |
| | "message": messList, |
| | "stream": True |
| | } |
| |
|
| | fullmessage = "" |
| | |
| | async with session.post(CHAT_URL, json=body) as resp: |
| | if resp.status != 200: |
| | return "Error occurred during the chat process." |
| |
|
| | |
| | buffer = "" |
| | async for chunk in resp.content.iter_any(): |
| | buffer += chunk.decode("utf-8") |
| |
|
| | |
| | while True: |
| | try: |
| | |
| | index = buffer.index('\n') |
| | json_str = buffer[:index].strip() |
| | buffer = buffer[index+1:] |
| |
|
| | if json_str.strip() == "[DONE]": |
| | break |
| |
|
| | data_dict = json.loads(json_str) |
| | fullmessage += data_dict.get("message", "") |
| | token = data_dict.get("resp_token", token) |
| | except (ValueError, json.JSONDecodeError): |
| | |
| | break |
| | |
| | messHistory.append({"role": "assistant", "content": fullmessage}) |
| | return fullmessage |
| |
|
| | def gradio_chat(user_input, mode): |
| | """Synchronous wrapper for the async chat function, integrated with Gradio.""" |
| | messHistory.append({"role": "user", "content": f"[{mode}] {user_input}"}) |
| | loop = asyncio.new_event_loop() |
| | asyncio.set_event_loop(loop) |
| | assistant_response = loop.run_until_complete(chat(messHistory)) |
| | return assistant_response |
| |
|
| | |
| | def chat_interface(user_input, mode): |
| | return gradio_chat(user_input, mode) |
| |
|
| | with gr.Blocks() as demo: |
| | gr.Markdown("# Chat with AI") |
| | |
| | with gr.Row(): |
| | radio_mode = gr.Radio(["Friendly", "Formal", "Humorous"], label="Chat Mode", value="Friendly") |
| | |
| | with gr.Row(): |
| | chatbot = gr.Interface( |
| | fn=lambda user_input: chat_interface(user_input, radio_mode.value), |
| | inputs=[gr.Textbox(label="Your message")], |
| | outputs=[gr.Markdown(label="Assistant response")] |
| | ) |
| | |
| | |
| | demo.launch() |
| |
|