File size: 4,084 Bytes
198d6d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
"""
Example usage of Marine1 model from Hugging Face Hub
This shows how users will interact with your model after upload
"""
from huggingface_hub import hf_hub_download
import torch
import librosa
import numpy as np
def download_and_use_model():
"""Example: Download model from HF Hub and make predictions"""
print("๐ฅ Downloading model from Hugging Face Hub...")
# Download the fine-tuned model
model_path = hf_hub_download(
repo_id="shiv207/Marine1",
filename="best_model_finetuned.pth"
)
print(f"โ
Model downloaded to: {model_path}")
# Load model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint = torch.load(model_path, map_location=device, weights_only=False)
print(f"โ
Model loaded successfully")
print(f" Classes: {list(checkpoint['class_to_id'].keys())}")
return model_path
def quick_predict(model_path, audio_path):
"""Quick prediction example"""
# Load audio
y, sr = librosa.load(audio_path, sr=16000, duration=10.0)
# Create mel spectrogram
mel_spec = librosa.feature.melspectrogram(
y=y, sr=sr, n_mels=128, n_fft=2048,
hop_length=512, fmax=8000
)
log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
# Load model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint = torch.load(model_path, map_location=device, weights_only=False)
# Create model architecture
from torchvision import models
model = models.resnet18(weights=None)
model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
num_classes = len(checkpoint['class_to_id'])
model.fc = torch.nn.Linear(model.fc.in_features, num_classes)
# Load weights
model.load_state_dict(checkpoint['model_state_dict'])
model.to(device)
model.eval()
# Predict
input_tensor = torch.FloatTensor(log_mel_spec).unsqueeze(0).unsqueeze(0).to(device)
with torch.no_grad():
outputs = model(input_tensor)
probabilities = torch.nn.functional.softmax(outputs, dim=1)[0]
# Get class names
id_to_class = {v: k for k, v in checkpoint['class_to_id'].items()}
class_names = [id_to_class[i] for i in range(num_classes)]
# Results
predicted_idx = probabilities.argmax().item()
predicted_class = class_names[predicted_idx]
confidence = probabilities[predicted_idx].item()
print(f"\n๐ฏ Prediction: {predicted_class.replace('_', ' ').title()}")
print(f" Confidence: {confidence*100:.2f}%")
print(f"\n All probabilities:")
for i, class_name in enumerate(class_names):
print(f" - {class_name.replace('_', ' ').title():25s}: {probabilities[i].item()*100:6.2f}%")
def use_with_inference_class():
"""Example using the provided inference class"""
from inference import Marine1Classifier
from huggingface_hub import hf_hub_download
# Download model
model_path = hf_hub_download(
repo_id="shiv207/Marine1",
filename="best_model_finetuned.pth"
)
# Initialize classifier
classifier = Marine1Classifier(model_path)
# Make prediction
result = classifier.predict("your_audio.wav")
print(f"Prediction: {result['predicted_class']}")
print(f"Confidence: {result['confidence']*100:.2f}%")
return result
if __name__ == "__main__":
import sys
print("๐ Marine1 Model Usage Examples\n")
# Example 1: Download model
print("Example 1: Downloading model from Hugging Face Hub")
print("-" * 50)
model_path = download_and_use_model()
# Example 2: Make prediction (if audio file provided)
if len(sys.argv) > 1:
audio_path = sys.argv[1]
print(f"\nExample 2: Making prediction on {audio_path}")
print("-" * 50)
quick_predict(model_path, audio_path)
else:
print("\nTo test prediction, run:")
print("python example_usage.py path/to/audio.wav")
|