Sumit404 commited on
Commit
f32d5e5
·
verified ·
1 Parent(s): 3291cc7

Upload 6 files

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. M23IQT007.pdf +3 -0
  3. app.py +29 -0
  4. gradio_ui.py +18 -0
  5. sentiment_analysis.py +14 -0
  6. speech_to_text.py +13 -0
  7. ui.py +17 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ M23IQT007.pdf filter=lfs diff=lfs merge=lfs -text
M23IQT007.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1db532e463a5591c3d4852d142189178c7e01e21fc75523b7b713983628d421d
3
+ size 510689
app.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File
2
+ import whisper
3
+ from transformers import pipeline
4
+ import tempfile
5
+
6
+ app = FastAPI()
7
+
8
+ # Load Models
9
+ speech_model = whisper.load_model("base")
10
+ sentiment_analyzer = pipeline("sentiment-analysis")
11
+
12
+ @app.post("/analyze/")
13
+ async def analyze(audio: UploadFile = File(...)):
14
+ with tempfile.NamedTemporaryFile(delete=False) as temp:
15
+ temp.write(await audio.read())
16
+ temp_path = temp.name
17
+
18
+ # Transcribe Speech
19
+ transcription = speech_model.transcribe(temp_path)["text"]
20
+
21
+ # Analyze Sentiment
22
+ sentiment = sentiment_analyzer(transcription)
23
+
24
+ return {
25
+ "transcription": transcription,
26
+ "sentiment": sentiment[0]
27
+ }
28
+
29
+ # Run Server: uvicorn app:app --reload
gradio_ui.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+
4
+ def process_audio(audio):
5
+ files = {"audio": audio}
6
+ response = requests.post("http://127.0.0.1:8000/analyze/", files=files)
7
+
8
+ if response.status_code == 200:
9
+ data = response.json()
10
+ return data["transcription"], data["sentiment"]
11
+ else:
12
+ return "Error", "Error"
13
+
14
+ gr.Interface(
15
+ fn=process_audio,
16
+ inputs="audio",
17
+ outputs=["text", "text"]
18
+ ).launch(share=True)
sentiment_analysis.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+
3
+ # Load Pre-trained Sentiment Model
4
+ sentiment_analyzer = pipeline("sentiment-analysis")
5
+
6
+ def analyze_sentiment(text):
7
+ result = sentiment_analyzer(text)
8
+ return result[0]
9
+
10
+ # Test
11
+ if __name__ == "__main__":
12
+ text = "I am really happy with the service!"
13
+ sentiment = analyze_sentiment(text)
14
+ print("Sentiment:", sentiment)
speech_to_text.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+
3
+ # Load Whisper Model
4
+ model = whisper.load_model("base")
5
+
6
+ def transcribe_audio(audio_file):
7
+ result = model.transcribe(audio_file)
8
+ return result["text"]
9
+
10
+ # Test
11
+ if __name__ == "__main__":
12
+ transcript = transcribe_audio("sample_audio.wav")
13
+ print("Transcription:", transcript)
ui.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+
4
+ st.title("AI Voice Intelligence for Customer Support")
5
+
6
+ uploaded_file = st.file_uploader("Upload an audio file", type=["wav", "mp3"])
7
+
8
+ if uploaded_file:
9
+ files = {"audio": uploaded_file.getvalue()}
10
+ response = requests.post("http://127.0.0.1:8000/analyze/", files=files)
11
+
12
+ if response.status_code == 200:
13
+ data = response.json()
14
+ st.write("### Transcription:")
15
+ st.write(data["transcription"])
16
+ st.write("### Sentiment Analysis:")
17
+ st.write(data["sentiment"])