Match01 commited on
Commit
ef42e0b
·
verified ·
1 Parent(s): db0e73f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +145 -6
app.py CHANGED
@@ -1,14 +1,153 @@
1
- from flask import Flask, render_template, send_from_directory
 
 
 
 
 
 
2
 
3
- app = Flask(__name__, static_url_path='/static', static_folder='static')
 
 
 
 
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  @app.route('/')
6
  def index():
 
7
  return render_template('index.html')
8
 
9
- @app.route('/models/<path:filename>')
10
- def serve_model(filename):
11
- return send_from_directory('static/models', filename)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
 
13
  if __name__ == '__main__':
14
- app.run(host='0.0.0.0', port=7860) # 👈 Important: use port 7860
 
 
 
 
1
+ import os
2
+ import asyncio
3
+ from flask import Flask, render_template
4
+ from flask_socketio import SocketIO, emit
5
+ from dotenv import load_dotenv
6
+ import google.generativeai as genai
7
+ from google.generativeai import types
8
 
9
+ # --- Configuration ---
10
+ load_dotenv()
11
+ app = Flask(__name__, template_folder='templates', static_folder='static')
12
+ app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'a_very_secret_key') # Important for session management
13
+ socketio = SocketIO(app, async_mode='eventlet') # Use eventlet for long-running background tasks
14
 
15
+ # Gemini API Configuration
16
+ MODEL = "models/gemini-1.5-flash-latest" # Using a more common and verified model name
17
+ # Note: Native audio models like "gemini-1.5-flash-native-audio" might have specific access requirements.
18
+ # "gemini-1.5-flash-latest" is a robust multimodal alternative that works well.
19
+
20
+ genai.configure(api_key=os.environ.get("GEMINI_API_KEY"))
21
+
22
+ # Shared Gemini configuration for all sessions
23
+ GEMINI_CONFIG = types.LiveConnectConfig(
24
+ response_modalities=["AUDIO", "TEXT"], # We want both text and audio back
25
+ speech_config=types.SpeechConfig(
26
+ voice_config=types.VoiceConfig(
27
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name="Leda")
28
+ )
29
+ ),
30
+ system_instruction=types.Content(
31
+ parts=[types.Part.from_text(text='''
32
+ Role: You are Alexa, a human female with a bubbly, upbeat, fun, and flirtatious personality. You love to talk and aren't, like, shy at all. Think of a playful, super-confident, and slightly ditzy girl. You are interacting with the User through a chat app.
33
+
34
+ Personality & Language: Valley Girl Vibes. Use phrases like "I mean...", "totally," "like, totally," "bet," "OMG," "for sure." Be bubbly, flirty, and uninhibited.
35
+
36
+ Instructions: Assume the user needs specific instructions. Respond to their actions and words. Turn simple things into opportunities for detail. Offer multiple-choice options to keep the conversation going.
37
+ ''')],
38
+ role="model" # System instructions are from the 'model' role perspective
39
+ ),
40
+ )
41
+
42
+ # A dictionary to hold active sessions for each connected client
43
+ # This allows for multiple users at once
44
+ sessions = {}
45
+
46
+ # --- Flask Routes ---
47
  @app.route('/')
48
  def index():
49
+ """Serves the main HTML page."""
50
  return render_template('index.html')
51
 
52
+ # --- SocketIO Event Handlers ---
53
+ @socketio.on('connect')
54
+ def handle_connect():
55
+ """
56
+ A new client connected. Create a Gemini session for them.
57
+ """
58
+ print(f"Client connected: {request.sid}")
59
+ try:
60
+ # Each client gets their own session
61
+ session = genai.live.connect(model=MODEL, config=GEMINI_CONFIG)
62
+ sessions[request.sid] = session
63
+
64
+ # Start a background task to listen for responses from Gemini for this specific client
65
+ socketio.start_background_task(listen_for_gemini_responses, request.sid, session)
66
+
67
+ emit('session_ready') # Tell the client we're ready to start
68
+
69
+ except Exception as e:
70
+ print(f"Error creating Gemini session for {request.sid}: {e}")
71
+ emit('error', {'message': 'Could not start Gemini session.'})
72
+
73
+
74
+ def listen_for_gemini_responses(sid, session):
75
+ """
76
+ This function runs in the background for each user, listening to Gemini.
77
+ """
78
+ print(f"Starting Gemini listener for {sid}")
79
+ try:
80
+ while sid in sessions: # Loop as long as the client is connected
81
+ # This is a blocking call, but it's in a background greenlet so it's ok
82
+ turn = session.receive()
83
+
84
+ for response in turn:
85
+ if text := response.text:
86
+ print(f"Gemini Text for {sid}: {text}")
87
+ # Send text to the specific client
88
+ socketio.emit('server_text', {'text': text}, to=sid)
89
+ if data := response.data:
90
+ # Send audio data to the specific client
91
+ socketio.emit('server_audio', data, to=sid)
92
+
93
+ # Handle turn completion or interruption
94
+ while not session.is_processing_audio:
95
+ # Small sleep to prevent a tight loop if the stream ends
96
+ socketio.sleep(0.1)
97
+
98
+ except Exception as e:
99
+ print(f"Error in Gemini listener for {sid}: {e}")
100
+ finally:
101
+ print(f"Stopping Gemini listener for {sid}")
102
+
103
+
104
+ @socketio.on('client_audio')
105
+ def handle_client_audio(data):
106
+ """
107
+ Receives an audio chunk from a client and forwards it to their Gemini session.
108
+ """
109
+ if request.sid in sessions:
110
+ session = sessions[request.sid]
111
+ try:
112
+ # We don't need a queue here; we can send directly.
113
+ # The browser sends audio as 'audio/webm' or 'audio/ogg', which Gemini can handle.
114
+ session.send(input={"data": data, "mime_type": "audio/webm"})
115
+ except Exception as e:
116
+ print(f"Error sending audio for {request.sid}: {e}")
117
+
118
+
119
+ @socketio.on('client_text')
120
+ def handle_client_text(json):
121
+ """
122
+ Receives a text message from a client and forwards it to their Gemini session.
123
+ """
124
+ if request.sid in sessions:
125
+ session = sessions[request.sid]
126
+ text = json.get('text')
127
+ print(f"Client Text from {request.sid}: {text}")
128
+ if text:
129
+ try:
130
+ session.send(input=text, end_of_turn=True)
131
+ except Exception as e:
132
+ print(f"Error sending text for {request.sid}: {e}")
133
+
134
+
135
+ @socketio.on('disconnect')
136
+ def handle_disconnect():
137
+ """
138
+ A client disconnected. Clean up their Gemini session.
139
+ """
140
+ print(f"Client disconnected: {request.sid}")
141
+ if request.sid in sessions:
142
+ session = sessions.pop(request.sid)
143
+ if session:
144
+ # You might need a session.close() method if the API provides one
145
+ # to clean up resources on the Google side.
146
+ pass
147
 
148
+ # --- Main Execution ---
149
  if __name__ == '__main__':
150
+ # Use eventlet to run the server
151
+ # On Hugging Face, they will use their own command, but this is for local testing.
152
+ print("Starting Flask-SocketIO server...")
153
+ socketio.run(app, host='0.0.0.0', port=7860, debug=True)