import gradio as gr from transformers import pipeline # Load AI model print("Loading model...") try: generator = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.1", device=-1) except: # Fallback to smaller model generator = pipeline("text-generation", model="gpt2") def chat(user_message): """Chat with AI model""" try: # Format message for better responses prompt = f"User: {user_message}\nAssistant:" response = generator( prompt, max_length=300, num_return_sequences=1, temperature=0.7, top_p=0.9, do_sample=True ) # Extract and clean response generated_text = response[0]["generated_text"] # Remove the prompt from response assistant_response = generated_text.split("Assistant:")[-1].strip() return assistant_response if assistant_response else "I couldn't generate a response." except Exception as e: return f"Error: {str(e)}" # Create the EXACT same UI as your Kaggle version with gr.Blocks(title="Mini ChatBot", theme=gr.themes.Soft()) as demo: gr.Markdown("# Mini ChatBot") gr.Markdown("Powered by AI models - Works without local LLM downloads") with gr.Row(): with gr.Column(): user_input = gr.Textbox( label="Your Question", placeholder="Ask me anything...", lines=3, scale=1 ) submit_btn = gr.Button("Send", scale=1, variant="primary") with gr.Column(): output = gr.Textbox( label="Response", lines=5, scale=1, interactive=False ) # Connect button submit_btn.click(chat, inputs=user_input, outputs=output) # Add example questions gr.Examples( examples=[ "What is machine learning?", "Explain quantum computing", "How does AI work?", "What is deep learning?" ], inputs=user_input ) if __name__ == "__main__": demo.launch()