saa231 commited on
Commit
1bad0b9
·
verified ·
1 Parent(s): fce95fa

Update project_model.py

Browse files
Files changed (1) hide show
  1. project_model.py +14 -5
project_model.py CHANGED
@@ -16,20 +16,29 @@ from ultralytics import YOLO
16
  from transformers import pipeline, DPTFeatureExtractor, DPTForDepthEstimation
17
  from TTS.api import TTS
18
 
 
 
 
 
 
 
 
19
  # Load models
20
 
21
- device = "cuda" if torch.cuda.is_available() else "cpu"
22
- yolo_model = YOLO("yolov8n.pt")
23
- depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large").to(device).eval()
 
24
  depth_feat = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
25
- whisper_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1)
 
26
  gemma_pipe = pipeline(
27
  "image-text-to-text",
28
  model="google/gemma-3-4b-it",
29
  device=0 if torch.cuda.is_available() else -1,
30
  torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32
31
  )
32
- tts = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC")
33
 
34
  # Function to process image and audio
35
  def process_inputs(image: Image.Image, audio_path: str):
 
16
  from transformers import pipeline, DPTFeatureExtractor, DPTForDepthEstimation
17
  from TTS.api import TTS
18
 
19
+
20
+ from huggingface_hub import login
21
+ import os
22
+
23
+ # Login using token stored in environment variable
24
+ login(token=os.getenv("gemma3_token"))
25
+
26
  # Load models
27
 
28
+ device = "cuda" if torch.cuda.is_available() else "cpu" # Enable GPU
29
+
30
+ yolo_model = YOLO("yolov9n.pt") # Load YOLOv9
31
+ depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large").to(device).eval() # Load MiDaS
32
  depth_feat = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
33
+ whisper_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-small", device=0 if torch.cuda.is_available() else -1) # Load Whisper
34
+ # Load Gemma-3-4B
35
  gemma_pipe = pipeline(
36
  "image-text-to-text",
37
  model="google/gemma-3-4b-it",
38
  device=0 if torch.cuda.is_available() else -1,
39
  torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32
40
  )
41
+ tts = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC") # Load Text-to-Speech (TTS)
42
 
43
  # Function to process image and audio
44
  def process_inputs(image: Image.Image, audio_path: str):