File size: 777 Bytes
6b77899 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
import os
import torch
from transformers import AutoTokenizer
from modeling_reward import load_finetuned_model
def main():
repo_root = os.path.dirname(os.path.abspath(__file__))
tokenizer = AutoTokenizer.from_pretrained(repo_root)
model = load_finetuned_model(repo_root)
sql = "SELECT COUNT(*) FROM orders WHERE status = 'complete';"
reasoning = "think: Count rows in orders filtered by status 'complete'."
nl = "How many completed orders exist?"
text = f"SQL: {sql}\nReasoning: {reasoning}\nNL: {nl}"
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=2048)
with torch.no_grad():
score = model(**inputs)["scores"].item()
print(f"Reward score: {score:.3f}")
if __name__ == "__main__":
main()
|