Spaces:
Runtime error
Runtime error
| from transformers import pipeline | |
| import gradio as gr | |
| # Load the Whisper model | |
| whisper_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-small") | |
| # Load the fine-tuned BERT model for harassment classification | |
| bert_pipe = pipeline("text-classification", model="abdulelahagr/harassment_lang_classifier") | |
| def classify_harassment(text): | |
| predicted_category = bert_pipe(text) | |
| return predicted_category | |
| def process_audio(speech_file): | |
| whisper_result = whisper_pipe(speech_file, generate_kwargs={"language": "english"}) | |
| transcription = whisper_result["text"] | |
| # 2. Classify the transcribed text | |
| classification_result = classify_harassment(transcription) | |
| predicted_label = classification_result[0]['label'] | |
| print(transcription, predicted_label) | |
| # 3. Prepare results for display | |
| return transcription, predicted_label | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## Kids harassment Classification") | |
| audio_input = gr.Audio(type="filepath") | |
| btn_process = gr.Button("Process") | |
| transcription_output = gr.Textbox(label="Transcription") | |
| classification_output = gr.Label(label="Classification Result") | |
| btn_process.click(process_audio, inputs=audio_input, outputs=[transcription_output, classification_output]) | |
| demo.launch(debug=True) | |