from transformers import pipeline # Initialize the GPT-2 model using Hugging Face's pipeline for text generation generator = pipeline("text-generation", model="gpt2") def generate_answer(question): # Generate an answer using GPT-2 response = generator(question, max_length=100, num_return_sequences=1) # Extract the generated text and return it as the answer answer = response[0]['generated_text'] # Clean the answer to remove the prompt (question) answer = answer.replace(question, '').strip() # Format the answer for Gradio's chatbot component using the 'messages' format messages = [ {"role": "user", "content": question}, {"role": "assistant", "content": answer} ] return messages