AbdulWahab14 commited on
Commit
6808dee
·
verified ·
1 Parent(s): bd3b8c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -13
app.py CHANGED
@@ -16,24 +16,22 @@ MODEL_ID = "microsoft/phi-2"
16
 
17
  @st.cache_resource
18
  def load_model():
19
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
20
- # Set pad_token_id for the tokenizer if it's not already set, using eos_token_id as a fallback
21
- if tokenizer.pad_token_id is None:
22
- tokenizer.pad_token_id = tokenizer.eos_token_id
23
-
24
- # Load the model configuration
25
- config = AutoConfig.from_pretrained(MODEL_ID)
26
 
27
- # Check if pad_token_id exists in config and set it if not, for Phi models this often needs to be explicitly added
28
- if not hasattr(config, 'pad_token_id') or config.pad_token_id is None:
29
- config.pad_token_id = tokenizer.pad_token_id
30
 
31
  model = AutoModelForCausalLM.from_pretrained(
32
  MODEL_ID,
33
- config=config, # Pass the modified config to the model
34
- torch_dtype=torch.float32,
35
- device_map="auto"
36
  )
 
 
37
  return tokenizer, model
38
 
39
  tokenizer, model = load_model()
 
16
 
17
  @st.cache_resource
18
  def load_model():
19
+ tokenizer = AutoTokenizer.from_pretrained(
20
+ MODEL_ID,
21
+ trust_remote_code=True
22
+ )
 
 
 
23
 
24
+ if tokenizer.pad_token is None:
25
+ tokenizer.pad_token = tokenizer.eos_token
 
26
 
27
  model = AutoModelForCausalLM.from_pretrained(
28
  MODEL_ID,
29
+ trust_remote_code=True,
30
+ torch_dtype=torch.float32 # CPU safe
31
+ # ❌ removed device_map="auto"
32
  )
33
+
34
+ model.eval()
35
  return tokenizer, model
36
 
37
  tokenizer, model = load_model()