DarianNLP commited on
Commit
8bf3b8b
·
verified ·
1 Parent(s): 544e77f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -10,9 +10,9 @@ import torch
10
  from transformers import AutoTokenizer
11
  from modeling_reward import BERTRewardModel
12
 
13
- model_name = DarianNLP/modernbert-nl-sql
14
- tokenizer = AutoTokenizer.from_pretrained(DarianNLP/modernbert-nl-sql, trust_remote_code=True)
15
- model = BERTRewardModel(model_name="answerdotai/ModernBERT-base")
16
  state_dict = torch.load("model.safetensors") # or use safetensors.torch.load_file
17
  model.load_state_dict(state_dict)
18
  model.eval()
@@ -34,5 +34,5 @@ For convenience, `modeling_reward.py` exposes `load_finetuned_model(model_dir)`
34
  - The reward target is bounded `[0, 1]` and already penalizes copied NL or incorrect reasoning.
35
  - The model uses mean pooling instead of CLS to better leverage long ModernBERT contexts.
36
  - Tokenizer files are saved from the finetuned run; no extra special tokens were introduced.
37
- - If you upload to the Hub and want `AutoModel.from_pretrained(..., trust_remote_code=True)` support, keep `modeling_reward.py` in the repo root.
38
 
 
10
  from transformers import AutoTokenizer
11
  from modeling_reward import BERTRewardModel
12
 
13
+ model_name = "DarianNLP/modernbert-nl-sql"
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
15
+ model = BERTRewardModel(model_name=model_name)
16
  state_dict = torch.load("model.safetensors") # or use safetensors.torch.load_file
17
  model.load_state_dict(state_dict)
18
  model.eval()
 
34
  - The reward target is bounded `[0, 1]` and already penalizes copied NL or incorrect reasoning.
35
  - The model uses mean pooling instead of CLS to better leverage long ModernBERT contexts.
36
  - Tokenizer files are saved from the finetuned run; no extra special tokens were introduced.
37
+
38