Veltrix / model.py
MGZON's picture
Create model.py
61253aa verified
raw
history blame contribute delete
310 Bytes
from transformers import pipeline
from config import MODEL_NAME
def load_model():
return pipeline("text2text-generation", model=MODEL_NAME)
def generate_answer(pipe, question):
result = pipe(question, max_length=300, do_sample=True, top_p=0.9, temperature=0.7)
return result[0]["generated_text"]