Marine1 / example_usage.py
shiv207's picture
Upload folder using huggingface_hub
198d6d5 verified
"""
Example usage of Marine1 model from Hugging Face Hub
This shows how users will interact with your model after upload
"""
from huggingface_hub import hf_hub_download
import torch
import librosa
import numpy as np
def download_and_use_model():
"""Example: Download model from HF Hub and make predictions"""
print("πŸ“₯ Downloading model from Hugging Face Hub...")
# Download the fine-tuned model
model_path = hf_hub_download(
repo_id="shiv207/Marine1",
filename="best_model_finetuned.pth"
)
print(f"βœ… Model downloaded to: {model_path}")
# Load model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint = torch.load(model_path, map_location=device, weights_only=False)
print(f"βœ… Model loaded successfully")
print(f" Classes: {list(checkpoint['class_to_id'].keys())}")
return model_path
def quick_predict(model_path, audio_path):
"""Quick prediction example"""
# Load audio
y, sr = librosa.load(audio_path, sr=16000, duration=10.0)
# Create mel spectrogram
mel_spec = librosa.feature.melspectrogram(
y=y, sr=sr, n_mels=128, n_fft=2048,
hop_length=512, fmax=8000
)
log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
# Load model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint = torch.load(model_path, map_location=device, weights_only=False)
# Create model architecture
from torchvision import models
model = models.resnet18(weights=None)
model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
num_classes = len(checkpoint['class_to_id'])
model.fc = torch.nn.Linear(model.fc.in_features, num_classes)
# Load weights
model.load_state_dict(checkpoint['model_state_dict'])
model.to(device)
model.eval()
# Predict
input_tensor = torch.FloatTensor(log_mel_spec).unsqueeze(0).unsqueeze(0).to(device)
with torch.no_grad():
outputs = model(input_tensor)
probabilities = torch.nn.functional.softmax(outputs, dim=1)[0]
# Get class names
id_to_class = {v: k for k, v in checkpoint['class_to_id'].items()}
class_names = [id_to_class[i] for i in range(num_classes)]
# Results
predicted_idx = probabilities.argmax().item()
predicted_class = class_names[predicted_idx]
confidence = probabilities[predicted_idx].item()
print(f"\n🎯 Prediction: {predicted_class.replace('_', ' ').title()}")
print(f" Confidence: {confidence*100:.2f}%")
print(f"\n All probabilities:")
for i, class_name in enumerate(class_names):
print(f" - {class_name.replace('_', ' ').title():25s}: {probabilities[i].item()*100:6.2f}%")
def use_with_inference_class():
"""Example using the provided inference class"""
from inference import Marine1Classifier
from huggingface_hub import hf_hub_download
# Download model
model_path = hf_hub_download(
repo_id="shiv207/Marine1",
filename="best_model_finetuned.pth"
)
# Initialize classifier
classifier = Marine1Classifier(model_path)
# Make prediction
result = classifier.predict("your_audio.wav")
print(f"Prediction: {result['predicted_class']}")
print(f"Confidence: {result['confidence']*100:.2f}%")
return result
if __name__ == "__main__":
import sys
print("🌊 Marine1 Model Usage Examples\n")
# Example 1: Download model
print("Example 1: Downloading model from Hugging Face Hub")
print("-" * 50)
model_path = download_and_use_model()
# Example 2: Make prediction (if audio file provided)
if len(sys.argv) > 1:
audio_path = sys.argv[1]
print(f"\nExample 2: Making prediction on {audio_path}")
print("-" * 50)
quick_predict(model_path, audio_path)
else:
print("\nTo test prediction, run:")
print("python example_usage.py path/to/audio.wav")