Gregory2041 commited on
Commit
481825b
·
verified ·
1 Parent(s): b067a09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -3
app.py CHANGED
@@ -9,9 +9,15 @@ login(token=hf_token)
9
 
10
  model_name = "meta-llama/Llama-3.2-3B-Instruct"
11
 
12
- # Load the tokenizer and model with the token
13
- tokenizer = AutoTokenizer.from_pretrained(model_name)
14
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
 
 
15
 
16
  # Define the generator function using the LLaMA model
17
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
9
 
10
  model_name = "meta-llama/Llama-3.2-3B-Instruct"
11
 
12
+ rope_scaling = {
13
+ "type": "llama3", # or another valid type
14
+ "factor": 32.0 # your scaling factor, can be adjusted based on needs
15
+ }
16
+
17
+ # Ensure the model loading process uses the corrected `rope_scaling`
18
+ tokenizer = AutoTokenizer.from_pretrained(model_name, rope_scaling=rope_scaling)
19
+ model = AutoModelForCausalLM.from_pretrained(model_name, rope_scaling=rope_scaling)
20
+
21
 
22
  # Define the generator function using the LLaMA model
23
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)