from transformers import AutoTokenizer, AutoModelForSeq2SeqLM class QuestionGenerator: """ Generates answers to queries based on retrieved context using a seq2seq model. """ def __init__(self, model_name: str = "google/flan-t5-small"): self.model_name = model_name self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name) def generate(self, context: str, prompt: str, max_length: int = 256) -> str: """ Generates a text answer given a context and a question prompt. """ full_prompt = f"Context: {context}\n\nQuestion: {prompt}\nAnswer:" inputs = self.tokenizer(full_prompt, return_tensors="pt") outputs = self.model.generate(**inputs, max_length=max_length) return self.tokenizer.decode(outputs[0], skip_special_tokens=True)