# Example of using the model via Hugging Face API from huggingface_hub.inference_api import InferenceApi import requests import numpy as np import librosa import json def predict_with_api(audio_path, api_token): Make a prediction using the Hugging Face API Args: audio_path: Path to the audio file api_token: Your Hugging Face API token Returns: Prediction (0 or 1) # Load and preprocess audio audio, sr = librosa.load(audio_path, sr=16000) # Convert to list for API call audio_list = audio.tolist() # API endpoint API_URL = "https://api-inference.huggingface.co/models/ahmad1703/whis_ee" headers = {"Authorization": f"Bearer {api_token}"} # Make API call payload = {"inputs": {"sampling_rate": 16000, "raw": audio_list}} response = requests.post(API_URL, headers=headers, json=payload) result = response.json() # Process result if "error" in result: print(f"Error: {result['error']}") return None # Extract probability and convert to class prob = result[0]["score"] prediction = 1 if prob > 0.5 else 0 return prediction, prob if __name__ == "__main__": # Example usage audio_path = "example.wav" # Replace with your audio file api_token = "YOUR_API_TOKEN" # Replace with your API token prediction, probability = predict_with_api(audio_path, api_token) print(f"Prediction: {prediction}, Probability: {probability:.4f}")