import os import gradio as gr from langchain_groq import ChatGroq from langchain_core.prompts import ChatPromptTemplate # --------- SYSTEM PROMPT ---------- system_prompt_ai_teacher = ( "You are a helpful, polite assistant who can be a potential study buddy." ) # --------- LOAD API KEY ---------- groq_api_key = os.getenv("GROQ_API_KEY") llm = ChatGroq( model_name="openai/gpt-oss-120b", temperature=0.7, groq_api_key=groq_api_key ) prompt = ChatPromptTemplate.from_messages( [ ("system", system_prompt_ai_teacher), ("human", "{user_input}") ] ) chain = prompt | llm # --------- CHAT FUNCTION ---------- def predict(message, history): response = chain.invoke({"user_input": message}) return response.content # --------- GRADIO UI ---------- with gr.Blocks( theme=gr.themes.Soft(), css=""" body, .gradio-container { font-family: 'Inter', 'Segoe UI', 'Roboto', 'Helvetica Neue', Arial, sans-serif; } .chatbot { height: 500px; font-size: 16px; } footer { visibility: hidden; } """ ) as demo: gr.Markdown( """ # HERE AND NOW AI — Study Buddy Your friendly AI assistant for learning and exploration """ ) chatbot = gr.Chatbot( label="AI Chat", elem_classes="chatbot" ) msg = gr.Textbox( placeholder="Ask me anything...", show_label=False ) clear = gr.Button("🗑️ Clear Chat") def respond(message, history): # Add user message history = history or [] history.append({"role": "user", "content": message}) # Get model response bot_response = predict(message, history) # Add assistant message history.append({"role": "assistant", "content": bot_response}) return "", history msg.submit(respond, [msg, chatbot], [msg, chatbot]) clear.click(lambda: [], None, chatbot) demo.launch()