arham061 commited on
Commit
461b2e6
·
1 Parent(s): b2a5c82

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -7
app.py CHANGED
@@ -1,19 +1,28 @@
1
  import gradio as gr
 
2
  import torch
3
- from transformers import pipeline
4
 
5
- # Load the model from Hugging Face space
6
- model = pipeline(task="audio-classification", model="FYP_Model")
 
 
7
 
8
  def classify_audio(audio_file):
9
- # Perform inference using the Hugging Face model
10
- result = model(audio_file.read())
11
- return result[0]["label"]
 
 
 
 
 
 
 
12
 
13
  # Gradio Interface
14
  iface = gr.Interface(
15
  fn=classify_audio,
16
- inputs=gr.Audio(type="filepath", label="Upload or Record Audio"),
17
  outputs=gr.Textbox(),
18
  live=True,
19
  )
 
1
  import gradio as gr
2
+ from transformers import BertTokenizer, BertForSequenceClassification
3
  import torch
 
4
 
5
+ # Load the model and tokenizer from the folder in Hugging Face space
6
+ model_folder = "FYP_Model" # Replace with your actual username and model name
7
+ tokenizer = BertTokenizer.from_pretrained(model_folder)
8
+ model = BertForSequenceClassification.from_pretrained(model_folder)
9
 
10
  def classify_audio(audio_file):
11
+ # Read the audio file and tokenize it
12
+ audio_content = audio_file.read()
13
+ inputs = tokenizer(audio_content, return_tensors="pt", truncation=True)
14
+
15
+ # Perform inference using the loaded model
16
+ outputs = model(**inputs)
17
+ logits = outputs.logits
18
+ predicted_class = torch.argmax(logits).item()
19
+
20
+ return f"Predicted class: {predicted_class}"
21
 
22
  # Gradio Interface
23
  iface = gr.Interface(
24
  fn=classify_audio,
25
+ inputs=gr.Audio(type="file", label="Upload or Record Audio"),
26
  outputs=gr.Textbox(),
27
  live=True,
28
  )