Update models.py
Browse files
models.py
CHANGED
|
@@ -13,29 +13,37 @@ def download_model_from_drive(file_id, destination_path):
|
|
| 13 |
"""Download the model from Google Drive using gdown."""
|
| 14 |
# Construct the Google Drive download URL
|
| 15 |
url = f"https://drive.google.com/uc?id={file_id}"
|
| 16 |
-
#
|
| 17 |
-
os.
|
|
|
|
|
|
|
| 18 |
# Download the file
|
| 19 |
gdown.download(url, destination_path, quiet=False)
|
| 20 |
|
| 21 |
def load_models(device='cpu'):
|
| 22 |
"""Load YOLO model and the caption generation model."""
|
| 23 |
-
#
|
| 24 |
model_file_path = "model.safetensors" # Adjust based on your file name
|
|
|
|
|
|
|
| 25 |
if not os.path.exists(model_file_path):
|
| 26 |
file_id = "1hUCqZ3X8mcM-KcwWFjcsFg7PA0hUvE3k" # Replace with your file ID
|
|
|
|
| 27 |
download_model_from_drive(file_id, model_file_path)
|
| 28 |
|
| 29 |
# Load the YOLO model
|
|
|
|
| 30 |
yolo_model = YOLO("best.pt").to(device)
|
| 31 |
|
| 32 |
# Load the processor for the caption model
|
|
|
|
| 33 |
processor = AutoProcessor.from_pretrained(
|
| 34 |
"microsoft/Florence-2-base",
|
| 35 |
trust_remote_code=True
|
| 36 |
)
|
| 37 |
|
| 38 |
# Load the caption model state dict from .safetensors
|
|
|
|
| 39 |
model_state_dict = load_file(model_file_path) # Load tensors from .safetensors
|
| 40 |
caption_model = AutoModelForCausalLM.from_pretrained(
|
| 41 |
"microsoft/Florence-2-base",
|
|
@@ -44,6 +52,7 @@ def load_models(device='cpu'):
|
|
| 44 |
caption_model.load_state_dict(model_state_dict) # Map tensors to the model
|
| 45 |
caption_model.to(device) # Move the model to the correct device
|
| 46 |
|
|
|
|
| 47 |
return {
|
| 48 |
'yolo_model': yolo_model,
|
| 49 |
'processor': processor,
|
|
@@ -54,4 +63,4 @@ def load_models(device='cpu'):
|
|
| 54 |
if __name__ == "__main__":
|
| 55 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 56 |
models = load_models(device=device)
|
| 57 |
-
print("
|
|
|
|
| 13 |
"""Download the model from Google Drive using gdown."""
|
| 14 |
# Construct the Google Drive download URL
|
| 15 |
url = f"https://drive.google.com/uc?id={file_id}"
|
| 16 |
+
# Extract the directory name from the destination path
|
| 17 |
+
directory = os.path.dirname(destination_path)
|
| 18 |
+
if directory: # Ensure the directory exists only if it's valid
|
| 19 |
+
os.makedirs(directory, exist_ok=True)
|
| 20 |
# Download the file
|
| 21 |
gdown.download(url, destination_path, quiet=False)
|
| 22 |
|
| 23 |
def load_models(device='cpu'):
|
| 24 |
"""Load YOLO model and the caption generation model."""
|
| 25 |
+
# Define the file path for the .safetensors model
|
| 26 |
model_file_path = "model.safetensors" # Adjust based on your file name
|
| 27 |
+
|
| 28 |
+
# Download the model file if it doesn't exist
|
| 29 |
if not os.path.exists(model_file_path):
|
| 30 |
file_id = "1hUCqZ3X8mcM-KcwWFjcsFg7PA0hUvE3k" # Replace with your file ID
|
| 31 |
+
print(f"Downloading model to {model_file_path}...")
|
| 32 |
download_model_from_drive(file_id, model_file_path)
|
| 33 |
|
| 34 |
# Load the YOLO model
|
| 35 |
+
print("Loading YOLO model...")
|
| 36 |
yolo_model = YOLO("best.pt").to(device)
|
| 37 |
|
| 38 |
# Load the processor for the caption model
|
| 39 |
+
print("Loading processor for the caption model...")
|
| 40 |
processor = AutoProcessor.from_pretrained(
|
| 41 |
"microsoft/Florence-2-base",
|
| 42 |
trust_remote_code=True
|
| 43 |
)
|
| 44 |
|
| 45 |
# Load the caption model state dict from .safetensors
|
| 46 |
+
print("Loading caption generation model...")
|
| 47 |
model_state_dict = load_file(model_file_path) # Load tensors from .safetensors
|
| 48 |
caption_model = AutoModelForCausalLM.from_pretrained(
|
| 49 |
"microsoft/Florence-2-base",
|
|
|
|
| 52 |
caption_model.load_state_dict(model_state_dict) # Map tensors to the model
|
| 53 |
caption_model.to(device) # Move the model to the correct device
|
| 54 |
|
| 55 |
+
print("Models loaded successfully!")
|
| 56 |
return {
|
| 57 |
'yolo_model': yolo_model,
|
| 58 |
'processor': processor,
|
|
|
|
| 63 |
if __name__ == "__main__":
|
| 64 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 65 |
models = load_models(device=device)
|
| 66 |
+
print("All models are ready to use!")
|