Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,9 +9,15 @@ login(token=hf_token)
|
|
| 9 |
|
| 10 |
model_name = "meta-llama/Llama-3.2-3B-Instruct"
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
# Define the generator function using the LLaMA model
|
| 17 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
|
|
| 9 |
|
| 10 |
model_name = "meta-llama/Llama-3.2-3B-Instruct"
|
| 11 |
|
| 12 |
+
rope_scaling = {
|
| 13 |
+
"type": "llama3", # or another valid type
|
| 14 |
+
"factor": 32.0 # your scaling factor, can be adjusted based on needs
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
# Ensure the model loading process uses the corrected `rope_scaling`
|
| 18 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, rope_scaling=rope_scaling)
|
| 19 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, rope_scaling=rope_scaling)
|
| 20 |
+
|
| 21 |
|
| 22 |
# Define the generator function using the LLaMA model
|
| 23 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|