Upload folder using huggingface_hub
Browse files- modeling_custom.py +16 -0
modeling_custom.py
CHANGED
|
@@ -149,6 +149,22 @@ class Qwen2ForCausalLMWithReward(Qwen2ForCausalLM):
|
|
| 149 |
|
| 150 |
return lm_output, success_scores
|
| 151 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
|
| 153 |
# For AutoModel registration
|
| 154 |
AutoModelForCausalLM = Qwen2ForCausalLMWithReward
|
|
|
|
| 149 |
|
| 150 |
return lm_output, success_scores
|
| 151 |
|
| 152 |
+
def generate(self, *args, **kwargs):
|
| 153 |
+
"""
|
| 154 |
+
Override generate to ensure return_score=False during generation.
|
| 155 |
+
|
| 156 |
+
The generate() method expects the forward pass to return only the model output,
|
| 157 |
+
not a tuple. We automatically set return_score=False here.
|
| 158 |
+
|
| 159 |
+
Usage:
|
| 160 |
+
>>> inputs = tokenizer("Hello", return_tensors="pt")
|
| 161 |
+
>>> outputs = model.generate(**inputs, max_new_tokens=50)
|
| 162 |
+
>>> # No need to pass return_score=False manually!
|
| 163 |
+
"""
|
| 164 |
+
# Force return_score=False during generation
|
| 165 |
+
kwargs['return_score'] = False
|
| 166 |
+
return super().generate(*args, **kwargs)
|
| 167 |
+
|
| 168 |
|
| 169 |
# For AutoModel registration
|
| 170 |
AutoModelForCausalLM = Qwen2ForCausalLMWithReward
|