import gradio as gr from transformers import GPT2LMHeadModel, GPT2Tokenizer, pipeline import torch # ========================= # Load Fine-Tuned GPT-2 Model # ========================= def load_model(): model_path = "zaid002/finetunedmodel" # Hugging Face repo tokenizer = GPT2Tokenizer.from_pretrained(model_path) model = GPT2LMHeadModel.from_pretrained(model_path) device = 0 if torch.cuda.is_available() else -1 generator = pipeline( "text-generation", model=model, tokenizer=tokenizer, device=device ) return tokenizer, generator tokenizer, chatbot = load_model() # ========================= # ChatGPT System Prompt # ========================= SYSTEM_PROMPT = """ You are a powerful AI assistant like ChatGPT. You can answer: - GUVI course-related questions - Programming (Python, Java, SQL, ML, AI, Data Science) - Interview preparation - Career guidance - General knowledge Rules: - Respond in the SAME language as the user - Be clear, helpful, and human-like - Give structured answers when needed - Avoid repetition """ # ========================= # Chat Function (ChatGPT Style) # ========================= def chatgpt_multilingual(user_input, history): prompt = SYSTEM_PROMPT + "\n\n" # Add conversation memory for user, bot in history: prompt += f"User: {user}\nAssistant: {bot}\n" prompt += f"User: {user_input}\nAssistant:" output = chatbot( prompt, max_length=400, do_sample=True, temperature=0.7, top_p=0.9, top_k=50, repetition_penalty=1.2, pad_token_id=tokenizer.eos_token_id ) response = output[0]["generated_text"].split("Assistant:")[-1].strip() # Safety trim response = response[:600] history.append((user_input, response)) return history, history # ========================= # Gradio UI # ========================= with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.HTML("""
Ask anything about GUVI, programming, careers, or general topics — in ANY language.