|
|
|
|
|
|
|
|
from huggingface_hub.inference_api import InferenceApi |
|
|
import requests |
|
|
import numpy as np |
|
|
import librosa |
|
|
import json |
|
|
|
|
|
def predict_with_api(audio_path, api_token): |
|
|
|
|
|
Make a prediction using the Hugging Face API |
|
|
|
|
|
Args: |
|
|
audio_path: Path to the audio file |
|
|
api_token: Your Hugging Face API token |
|
|
|
|
|
Returns: |
|
|
Prediction (0 or 1) |
|
|
|
|
|
|
|
|
audio, sr = librosa.load(audio_path, sr=16000) |
|
|
|
|
|
|
|
|
audio_list = audio.tolist() |
|
|
|
|
|
|
|
|
API_URL = "https://api-inference.huggingface.co/models/ahmad1703/whis_ee" |
|
|
headers = {"Authorization": f"Bearer {api_token}"} |
|
|
|
|
|
|
|
|
payload = {"inputs": {"sampling_rate": 16000, "raw": audio_list}} |
|
|
response = requests.post(API_URL, headers=headers, json=payload) |
|
|
result = response.json() |
|
|
|
|
|
|
|
|
if "error" in result: |
|
|
print(f"Error: {result['error']}") |
|
|
return None |
|
|
|
|
|
|
|
|
prob = result[0]["score"] |
|
|
prediction = 1 if prob > 0.5 else 0 |
|
|
|
|
|
return prediction, prob |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
audio_path = "example.wav" |
|
|
api_token = "YOUR_API_TOKEN" |
|
|
|
|
|
prediction, probability = predict_with_api(audio_path, api_token) |
|
|
print(f"Prediction: {prediction}, Probability: {probability:.4f}") |
|
|
|