Vlad Bastina commited on
Commit
da622d4
·
1 Parent(s): 60dbb29

plot with the image

Browse files
.gitignore CHANGED
@@ -3,4 +3,5 @@
3
  gen-lang-client-0065207637-eaf8e92995b6.json
4
 
5
  *.wav
6
- prompt.txt
 
 
3
  gen-lang-client-0065207637-eaf8e92995b6.json
4
 
5
  *.wav
6
+ prompt.txt
7
+ plot.png
__pycache__/gemini_call.cpython-312.pyc CHANGED
Binary files a/__pycache__/gemini_call.cpython-312.pyc and b/__pycache__/gemini_call.cpython-312.pyc differ
 
__pycache__/sentiment_analysis.cpython-312.pyc CHANGED
Binary files a/__pycache__/sentiment_analysis.cpython-312.pyc and b/__pycache__/sentiment_analysis.cpython-312.pyc differ
 
gemini_call.py CHANGED
@@ -1,5 +1,7 @@
1
  import google.generativeai as genai
2
  import os
 
 
3
 
4
  final_prompt = f'''Task:
5
 
@@ -39,11 +41,6 @@ final_prompt = f'''Task:
39
  Sentence 3: -3
40
  Sentence 4: +3
41
 
42
- Histogram:
43
- | Sentence 1 | Sentence 2 | Sentence 3 | Sentence 4 |
44
- |------------|------------|------------|------------|
45
- | -9 | -7 | -3 | +3 |
46
-
47
  Conclusion:
48
 
49
  After analyzing all sentences, provide a conclusion about the overall sentiment of the conversation.
@@ -71,12 +68,41 @@ api_key = os.getenv("GOOGLE_API_KEY")
71
  genai.configure(api_key=api_key)
72
  model = genai.GenerativeModel("gemini-2.0-flash-exp" , system_instruction=final_prompt)
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  def ask_gemini(prompt:str) ->str:
75
 
76
 
77
  response = model.generate_content(prompt)
 
 
 
 
 
78
  return response.text
79
 
80
  if __name__=="__main__":
81
- response = model.generate_content("I hated every minute of my experience with you guys. You are straight garbage. But i love the way you look")
82
- print(response.text)
 
1
  import google.generativeai as genai
2
  import os
3
+ import re
4
+ import matplotlib.pyplot as plt
5
 
6
  final_prompt = f'''Task:
7
 
 
41
  Sentence 3: -3
42
  Sentence 4: +3
43
 
 
 
 
 
 
44
  Conclusion:
45
 
46
  After analyzing all sentences, provide a conclusion about the overall sentiment of the conversation.
 
68
  genai.configure(api_key=api_key)
69
  model = genai.GenerativeModel("gemini-2.0-flash-exp" , system_instruction=final_prompt)
70
 
71
+ def extract_histogram_data(text):
72
+ # Regex to match the sentences and their sentiment score
73
+ histogram_pattern = r"Sentence (\d+): (-?\+?\d+)"
74
+
75
+ # Find all matches
76
+ matches = re.findall(histogram_pattern, text)
77
+
78
+ # Convert matches into two lists: sentence numbers and sentiment scores
79
+ sentences = [int(match[0]) for match in matches]
80
+ sentiment_scores = [int(match[1]) for match in matches]
81
+
82
+ return sentences, sentiment_scores
83
+
84
+ def plot_histogram(sentences, sentiment_scores):
85
+ plt.figure(figsize=(6, 4))
86
+ plt.bar(sentences, sentiment_scores, color='skyblue')
87
+ plt.xlabel('Sentence Number')
88
+ plt.ylabel('Sentiment Score')
89
+ plt.title('Sentiment Analysis Histogram')
90
+ plt.ylim(-12, 12) # Set y-axis limits for better visualization
91
+ plt.grid(True, linestyle='--', alpha=0.5)
92
+ plt.savefig("plot.png" , format="png")
93
+ plt.close()
94
+
95
  def ask_gemini(prompt:str) ->str:
96
 
97
 
98
  response = model.generate_content(prompt)
99
+
100
+ sentences, sentiment_scores = extract_histogram_data(response.text)
101
+
102
+ plot_histogram(sentences, sentiment_scores)
103
+
104
  return response.text
105
 
106
  if __name__=="__main__":
107
+ response = ask_gemini("I hated every minute of my experience with you guys. You are straight garbage. But i love the way you look")
108
+ print(response)
sentiment_analysis.py CHANGED
@@ -13,5 +13,6 @@ def get_analysis(file_path)->str:
13
  return transcript , analysis
14
 
15
  if __name__ == "__main__":
16
- file_path = "harvard.wav"
17
- print(f'Analysis result {get_analysis(file_path)}')
 
 
13
  return transcript , analysis
14
 
15
  if __name__ == "__main__":
16
+ file_path = "recorded_audio.wav"
17
+ transcript , analysis = get_analysis(file_path)
18
+ print(f'Analysis result {analysis}')
streamlit_app.py CHANGED
@@ -4,6 +4,7 @@ from streamlit_mic_recorder import mic_recorder
4
  import wave
5
  import pyaudio
6
  from sentiment_analysis import get_analysis
 
7
 
8
  def save_audio(audio_data, filename):
9
  """Save the recorded audio in .wav format"""
@@ -14,6 +15,19 @@ def save_audio(audio_data, filename):
14
  wf.setframerate(44100) # Sample rate (44100 Hz)
15
  wf.writeframes(audio_data) # Write audio data to file
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  def main():
18
  st.title("Streamlit Voice Recorder")
19
 
@@ -46,14 +60,15 @@ def main():
46
  user_message, gemini_response = get_analysis("recorded_audio.wav")
47
 
48
  st.session_state.messages.append({"role": "user", "content": user_message})
49
- st.session_state.messages.append({"role": "gemini", "content": gemini_response})
50
 
51
  # Display the chat history
52
  for msg in st.session_state.messages:
 
53
  if msg["role"] == "user":
54
- st.chat_message(msg["role"]).markdown(f"**User:** {msg['content']}")
55
  elif msg["role"] == "gemini":
56
- st.chat_message(msg["role"]).markdown(f"**Gemini:** {msg['content']}")
57
 
58
  time.sleep(0.1)
59
 
 
4
  import wave
5
  import pyaudio
6
  from sentiment_analysis import get_analysis
7
+ from PIL import Image
8
 
9
  def save_audio(audio_data, filename):
10
  """Save the recorded audio in .wav format"""
 
15
  wf.setframerate(44100) # Sample rate (44100 Hz)
16
  wf.writeframes(audio_data) # Write audio data to file
17
 
18
+ def display_message(role, content, image_path=None):
19
+ if role == "user":
20
+ st.chat_message(role).markdown(f"**User:** {content}")
21
+ elif role == "gemini":
22
+ st.chat_message(role).markdown(f"**Gemini:** {content}")
23
+ if image_path:
24
+ # Display the image if it exists
25
+ try:
26
+ img = Image.open(image_path)
27
+ st.image(img, caption="Generated Image", use_column_width=True)
28
+ except Exception as e:
29
+ st.error(f"Error loading image: {e}")
30
+
31
  def main():
32
  st.title("Streamlit Voice Recorder")
33
 
 
60
  user_message, gemini_response = get_analysis("recorded_audio.wav")
61
 
62
  st.session_state.messages.append({"role": "user", "content": user_message})
63
+ st.session_state.messages.append({"role": "gemini", "content": gemini_response, "image_url": "plot.png"})
64
 
65
  # Display the chat history
66
  for msg in st.session_state.messages:
67
+ # Check if the message is from Gemini and display the image accordingly
68
  if msg["role"] == "user":
69
+ display_message(msg["role"], msg["content"])
70
  elif msg["role"] == "gemini":
71
+ display_message(msg["role"], msg["content"], msg.get("image_url"))
72
 
73
  time.sleep(0.1)
74