| from __future__ import annotations | |
| import gradio as gr | |
| from agent import ExamAgent | |
| try: | |
| from gradio.exceptions import Error as GradioComponentError | |
| except Exception: | |
| GradioComponentError = Exception | |
| def new_agent() -> ExamAgent: | |
| return ExamAgent() | |
| CHAT_MODE = "tuples" | |
| def detect_chat_mode(chatbot: gr.Chatbot) -> str: | |
| """ | |
| Detect what format current gr.Chatbot expects: | |
| - messages: list[{"role": "...", "content": "..."}] | |
| - tuples: list[(user, bot)] | |
| """ | |
| check = getattr(chatbot, "_check_format", None) | |
| if callable(check): | |
| try: | |
| check([{"role": "assistant", "content": "hi"}]) | |
| return "messages" | |
| except Exception: | |
| pass | |
| try: | |
| check([(None, "hi")]) | |
| return "tuples" | |
| except Exception: | |
| pass | |
| t = getattr(chatbot, "type", None) | |
| if t in ("messages", "tuples"): | |
| return t | |
| return "tuples" | |
| def init_chat(first_assistant_text: str): | |
| if CHAT_MODE == "messages": | |
| return [{"role": "assistant", "content": first_assistant_text}] | |
| return [(None, first_assistant_text)] | |
| def on_load(): | |
| agent = new_agent() | |
| first = agent.initial_message() | |
| chat = init_chat(first) | |
| return agent, chat | |
| def on_reset(): | |
| return on_load() | |
| def on_user_message( | |
| agent: ExamAgent, | |
| chat, | |
| user_text: str, | |
| api_key: str, | |
| model: str, | |
| base_url: str, | |
| ): | |
| if agent is None: | |
| agent = new_agent() | |
| if chat is None: | |
| chat = [] | |
| user_text = (user_text or "").strip() | |
| if not user_text: | |
| return agent, chat, "" | |
| if CHAT_MODE == "messages": | |
| chat.append({"role": "user", "content": user_text}) | |
| try: | |
| reply = agent.step(user_text, api_key=api_key, model=model, base_url=base_url) | |
| except Exception as e: | |
| reply = f"Сталася помилка: {e}" | |
| if CHAT_MODE == "messages": | |
| chat.append({"role": "assistant", "content": reply}) | |
| else: | |
| chat.append((user_text, reply)) | |
| return agent, chat, "" | |
| with gr.Blocks(title="AI Examiner Agent") as demo: | |
| gr.Markdown( | |
| "# AI Examiner Agent\n" | |
| "Сервіс проводить міні-іспит: питає ім’я та email, обирає 2–3 теми, " | |
| "ставить питання, оцінює відповіді та зберігає результат у файли." | |
| ) | |
| with gr.Row(): | |
| api_key = gr.Textbox(label="LLM API Key", type="password", placeholder="Встав ключ тут") | |
| model = gr.Textbox(label="Model", value="gpt-4o-mini") | |
| base_url = gr.Textbox(label="Base URL", value="https://api.openai.com") | |
| chatbot = gr.Chatbot(label="Exam Chat", height=420) | |
| CHAT_MODE = detect_chat_mode(chatbot) | |
| print(f"[AI Examiner Agent] Chatbot mode detected: {CHAT_MODE}") | |
| with gr.Row(): | |
| user_in = gr.Textbox(label="Твоє повідомлення", placeholder="Напиши відповідь…", scale=4) | |
| send = gr.Button("Send", scale=1) | |
| reset = gr.Button("Reset", scale=1) | |
| agent_state = gr.State() | |
| demo.load(on_load, outputs=[agent_state, chatbot]) | |
| reset.click(on_reset, outputs=[agent_state, chatbot]) | |
| send.click( | |
| on_user_message, | |
| inputs=[agent_state, chatbot, user_in, api_key, model, base_url], | |
| outputs=[agent_state, chatbot, user_in], | |
| api_name=False, | |
| ) | |
| user_in.submit( | |
| on_user_message, | |
| inputs=[agent_state, chatbot, user_in, api_key, model, base_url], | |
| outputs=[agent_state, chatbot, user_in], | |
| api_name=False, | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |