Spaces:
Sleeping
Sleeping
File size: 1,402 Bytes
1ee31df 9ddeec6 1ee31df 9ddeec6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | from transformers import T5Tokenizer, AutoModelForSeq2SeqLM, pipeline
class QGenerator:
def __init__(self):
tokenizer = T5Tokenizer.from_pretrained("valhalla/t5-small-qg-hl", use_fast=False)
model = AutoModelForSeq2SeqLM.from_pretrained("valhalla/t5-small-qg-hl")
self.qg = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
def split_sentences(self, text):
# Simple sentence splitting (for better results, use nltk or spacy)
return [s.strip() for s in text.split('.') if s.strip()]
def chunk_text(self, text, chunk_size=512):
return [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]
def generate(self, text, max_questions=5):
questions = []
sentences = self.split_sentences(text)
for sentence in sentences:
if len(questions) >= max_questions:
break
input_text = f"generate question: {sentence} </s>"
try:
result = self.qg(input_text, max_length=64, num_return_sequences=1)[0]
question = result["generated_text"]
if question and question not in questions:
questions.append(question)
except Exception as e:
print("Error generating question:", e)
continue
return questions
|