Chaitanya-aitf commited on
Commit
c9d5801
·
verified ·
1 Parent(s): a3b4341

Update models/visual_analyzer.py

Browse files
Files changed (1) hide show
  1. models/visual_analyzer.py +6 -0
models/visual_analyzer.py CHANGED
@@ -105,9 +105,13 @@ Respond with just the emotion."""
105
  """Load the Qwen2-VL model with quantization."""
106
  with LogTimer(logger, "Loading Qwen2-VL model"):
107
  try:
 
108
  import torch
109
  from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
110
 
 
 
 
111
  # Determine device
112
  if self.config.device == "cuda" and torch.cuda.is_available():
113
  self._device = "cuda"
@@ -120,6 +124,7 @@ Respond with just the emotion."""
120
  self.processor = AutoProcessor.from_pretrained(
121
  self.config.visual_model_id,
122
  trust_remote_code=True,
 
123
  )
124
 
125
  # Load model with quantization
@@ -158,6 +163,7 @@ Respond with just the emotion."""
158
 
159
  self.model = Qwen2VLForConditionalGeneration.from_pretrained(
160
  self.config.visual_model_id,
 
161
  **model_kwargs,
162
  )
163
 
 
105
  """Load the Qwen2-VL model with quantization."""
106
  with LogTimer(logger, "Loading Qwen2-VL model"):
107
  try:
108
+ import os
109
  import torch
110
  from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
111
 
112
+ # Get HuggingFace token from environment (optional - model is open access)
113
+ hf_token = os.environ.get("HF_TOKEN")
114
+
115
  # Determine device
116
  if self.config.device == "cuda" and torch.cuda.is_available():
117
  self._device = "cuda"
 
124
  self.processor = AutoProcessor.from_pretrained(
125
  self.config.visual_model_id,
126
  trust_remote_code=True,
127
+ token=hf_token,
128
  )
129
 
130
  # Load model with quantization
 
163
 
164
  self.model = Qwen2VLForConditionalGeneration.from_pretrained(
165
  self.config.visual_model_id,
166
+ token=hf_token,
167
  **model_kwargs,
168
  )
169