AKSazgar commited on
Commit
a7d3992
·
1 Parent(s): 7040489

Fix authentication for private model

Browse files

- Add HF_TOKEN environment variable support
- Fix deprecated torch_dtype to dtype
- Add token parameter to model and processor loading

Files changed (1) hide show
  1. app.py +14 -3
app.py CHANGED
@@ -13,17 +13,28 @@ import os
13
  # Model configuration
14
  MODEL_ID = "AKSazgar/ECG-Instruct-Llama-3.2-11B-Vision"
15
 
 
 
 
 
 
 
 
 
 
 
16
  print(f"Loading model: {MODEL_ID}")
17
  print("This may take a few minutes on first load...")
18
 
19
- # Load model and processor
20
  model = MllamaForConditionalGeneration.from_pretrained(
21
  MODEL_ID,
22
- torch_dtype=torch.bfloat16,
23
  device_map="auto",
 
24
  )
25
 
26
- processor = AutoProcessor.from_pretrained(MODEL_ID)
27
 
28
  print("Model loaded successfully!")
29
 
 
13
  # Model configuration
14
  MODEL_ID = "AKSazgar/ECG-Instruct-Llama-3.2-11B-Vision"
15
 
16
+ # Get HuggingFace token from environment variable
17
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
18
+
19
+ if not HF_TOKEN:
20
+ raise ValueError(
21
+ "HF_TOKEN environment variable is not set. "
22
+ "Please add your HuggingFace token to the Space secrets. "
23
+ "Go to Settings > Variables and secrets > Add a secret"
24
+ )
25
+
26
  print(f"Loading model: {MODEL_ID}")
27
  print("This may take a few minutes on first load...")
28
 
29
+ # Load model and processor with authentication token
30
  model = MllamaForConditionalGeneration.from_pretrained(
31
  MODEL_ID,
32
+ dtype=torch.bfloat16,
33
  device_map="auto",
34
+ token=HF_TOKEN,
35
  )
36
 
37
+ processor = AutoProcessor.from_pretrained(MODEL_ID, token=HF_TOKEN)
38
 
39
  print("Model loaded successfully!")
40