Alon Albalak commited on
Commit
ad858ed
·
1 Parent(s): 0640456

use HF token

Browse files
Files changed (1) hide show
  1. src/models/llm_manager.py +4 -2
src/models/llm_manager.py CHANGED
@@ -5,6 +5,8 @@ import torch
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
 
7
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
 
 
8
 
9
  class LLMManager:
10
  """Manages LLM model loading and text generation operations"""
@@ -15,8 +17,8 @@ class LLMManager:
15
 
16
  def load_models(self, model_name="meta-llama/Llama-3.2-1B-Instruct"):
17
  """Load the LLM model and tokenizer"""
18
- self.tokenizer = AutoTokenizer.from_pretrained(model_name)
19
- self.model = AutoModelForCausalLM.from_pretrained(model_name)
20
 
21
  if self.tokenizer.pad_token is None:
22
  self.tokenizer.pad_token = self.tokenizer.eos_token
 
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
 
7
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
8
+ HF_TOKEN = os.getenv("HF_TOKEN")
9
+
10
 
11
  class LLMManager:
12
  """Manages LLM model loading and text generation operations"""
 
17
 
18
  def load_models(self, model_name="meta-llama/Llama-3.2-1B-Instruct"):
19
  """Load the LLM model and tokenizer"""
20
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN)
21
+ self.model = AutoModelForCausalLM.from_pretrained(model_name, token=HF_TOKEN)
22
 
23
  if self.tokenizer.pad_token is None:
24
  self.tokenizer.pad_token = self.tokenizer.eos_token