Spaces:
Sleeping
Sleeping
| import whisper | |
| import gradio as gr | |
| import json | |
| import tempfile | |
| import os | |
| print("Loading Whisper model...") | |
| # Load model when space starts | |
| model = whisper.load_model("base") | |
| print("Model loaded successfully!") | |
| def transcribe_audio(audio_file): | |
| """ | |
| Simple function to transcribe audio to text | |
| """ | |
| try: | |
| print(f"Processing audio file: {audio_file}") | |
| # Transcribe the audio | |
| result = model.transcribe(audio_file) | |
| print("Transcription completed successfully!") | |
| # Return as JSON string | |
| return json.dumps({ | |
| "text": result["text"], | |
| "language": result["language"], | |
| "status": "success" | |
| }) | |
| except Exception as e: | |
| print(f"Error: {str(e)}") | |
| return json.dumps({ | |
| "error": str(e), | |
| "status": "failed" | |
| }) | |
| # Create a simple web interface | |
| demo = gr.Interface( | |
| fn=transcribe_audio, | |
| inputs=gr.Audio(sources=["upload"], type="filepath", label="Upload Audio File"), | |
| outputs=gr.Textbox(label="Transcription Result", lines=5), | |
| title=" Whisper Transcription API", | |
| description="Upload an audio file to convert it to text using OpenAI's Whisper model.", | |
| examples=[ | |
| ["https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac"], | |
| ["https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/2.flac"] | |
| ] | |
| ) | |
| # Launch the application | |
| if __name__ == "__main__": | |
| demo.launch(debug=True, share=True) |