# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("fava-uw/fava-model")
model = AutoModelForCausalLM.from_pretrained("fava-uw/fava-model")Quick Links
FAVA, a verification model.
import torch
import vllm
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model = vllm.LLM(model="fava-uw/fava-model")
sampling_params = vllm.SamplingParams(
temperature=0,
top_p=1.0,
max_tokens=1024,
)
INPUT = "Read the following references:\n{evidence}\nPlease identify all the errors in the following text using the information in the references provided and suggest edits if necessary:\n[Text] {output}\n[Edited] "
output = "" # add your passage to verify
evidence = "" # add a piece of evidence
prompts = [INPUT.format_map({"evidence": evidence, "output": output})]
outputs = model.generate(prompts, sampling_params)
outputs = [it.outputs[0].text for it in outputs]
print(outputs[0])
- Downloads last month
- 1,769
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="fava-uw/fava-model")