Spaces:
Sleeping
Sleeping
| import os | |
| import asyncio | |
| import base64 | |
| import json | |
| import uuid | |
| import warnings | |
| import pyaudio | |
| import pytz | |
| import random | |
| import hashlib | |
| import datetime | |
| import time | |
| import inspect | |
| # Import dotenv for environment variables | |
| try: | |
| from dotenv import load_dotenv | |
| # Load environment variables from .env file if it exists | |
| load_dotenv() | |
| except ImportError: | |
| print("Warning: python-dotenv not installed, using environment variables directly") | |
| pass | |
| # Check for HuggingFace Spaces environment | |
| def is_huggingface_spaces(): | |
| """Detect if we're running on HuggingFace Spaces""" | |
| return "SPACE_ID" in os.environ or "SYSTEM" in os.environ and os.environ.get("SYSTEM") == "spaces" | |
| # Handle HuggingFace Spaces secrets | |
| def setup_environment_variables(): | |
| """Set up AWS credentials from various sources including Hugging Face Spaces secrets""" | |
| # Explicitly check for HuggingFace Spaces secrets | |
| if is_huggingface_spaces(): | |
| print("Detected HuggingFace Spaces environment, checking for secrets...") | |
| # In HF Spaces, secrets might be in different formats | |
| # Check for HF_AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY_ID | |
| if os.environ.get("HF_AWS_ACCESS_KEY_ID") and not os.environ.get("AWS_ACCESS_KEY_ID"): | |
| os.environ["AWS_ACCESS_KEY_ID"] = os.environ.get("HF_AWS_ACCESS_KEY_ID") | |
| print("Using HF_AWS_ACCESS_KEY_ID") | |
| if os.environ.get("HF_AWS_SECRET_ACCESS_KEY") and not os.environ.get("AWS_SECRET_ACCESS_KEY"): | |
| os.environ["AWS_SECRET_ACCESS_KEY"] = os.environ.get("HF_AWS_SECRET_ACCESS_KEY") | |
| print("Using HF_AWS_SECRET_ACCESS_KEY") | |
| # Set default region if not already set | |
| if not os.environ.get("AWS_DEFAULT_REGION"): | |
| os.environ["AWS_DEFAULT_REGION"] = "us-east-1" | |
| print("Set default AWS region to us-east-1") | |
| # Set up environment variables | |
| setup_environment_variables() | |
| from aws_sdk_bedrock_runtime.client import BedrockRuntimeClient, InvokeModelWithBidirectionalStreamOperationInput | |
| from aws_sdk_bedrock_runtime.models import InvokeModelWithBidirectionalStreamInputChunk, BidirectionalInputPayloadPart | |
| from aws_sdk_bedrock_runtime.config import Config, HTTPAuthSchemeResolver, SigV4AuthScheme | |
| from smithy_aws_core.credentials_resolvers.environment import EnvironmentCredentialsResolver | |
| # Suppress warnings | |
| warnings.filterwarnings("ignore") | |
| # Audio configuration | |
| INPUT_SAMPLE_RATE = 16000 | |
| OUTPUT_SAMPLE_RATE = 24000 | |
| CHANNELS = 1 | |
| FORMAT = pyaudio.paInt16 | |
| CHUNK_SIZE = 1024 # Number of frames per buffer | |
| # Debug mode flag | |
| DEBUG = False | |
| def debug_print(message): | |
| """Print only if debug mode is enabled""" | |
| if DEBUG: | |
| functionName = inspect.stack()[1].function | |
| if functionName == 'time_it' or functionName == 'time_it_async': | |
| functionName = inspect.stack()[2].function | |
| print('{:%Y-%m-%d %H:%M:%S.%f}'.format(datetime.datetime.now())[:-3] + ' ' + functionName + ' ' + message) | |
| def time_it(label, methodToRun): | |
| start_time = time.perf_counter() | |
| result = methodToRun() | |
| end_time = time.perf_counter() | |
| debug_print(f"Execution time for {label}: {end_time - start_time:.4f} seconds") | |
| return result | |
| async def time_it_async(label, methodToRun): | |
| start_time = time.perf_counter() | |
| result = await methodToRun() | |
| end_time = time.perf_counter() | |
| debug_print(f"Execution time for {label}: {end_time - start_time:.4f} seconds") | |
| return result | |
| class BedrockStreamManager: | |
| """Manages bidirectional streaming with AWS Bedrock using asyncio""" | |
| # Event templates | |
| START_SESSION_EVENT = '''{ | |
| "event": { | |
| "sessionStart": { | |
| "inferenceConfiguration": { | |
| "maxTokens": 1024, | |
| "topP": 0.9, | |
| "temperature": 0.7 | |
| } | |
| } | |
| } | |
| }''' | |
| CONTENT_START_EVENT = '''{ | |
| "event": { | |
| "contentStart": { | |
| "promptName": "%s", | |
| "contentName": "%s", | |
| "type": "AUDIO", | |
| "interactive": true, | |
| "role": "USER", | |
| "audioInputConfiguration": { | |
| "mediaType": "audio/lpcm", | |
| "sampleRateHertz": 16000, | |
| "sampleSizeBits": 16, | |
| "channelCount": 1, | |
| "audioType": "SPEECH", | |
| "encoding": "base64" | |
| } | |
| } | |
| } | |
| }''' | |
| AUDIO_EVENT_TEMPLATE = '''{ | |
| "event": { | |
| "audioInput": { | |
| "promptName": "%s", | |
| "contentName": "%s", | |
| "content": "%s" | |
| } | |
| } | |
| }''' | |
| TEXT_CONTENT_START_EVENT = '''{ | |
| "event": { | |
| "contentStart": { | |
| "promptName": "%s", | |
| "contentName": "%s", | |
| "type": "TEXT", | |
| "role": "%s", | |
| "interactive": true, | |
| "textInputConfiguration": { | |
| "mediaType": "text/plain" | |
| } | |
| } | |
| } | |
| }''' | |
| TEXT_INPUT_EVENT = '''{ | |
| "event": { | |
| "textInput": { | |
| "promptName": "%s", | |
| "contentName": "%s", | |
| "content": "%s" | |
| } | |
| } | |
| }''' | |
| TOOL_CONTENT_START_EVENT = '''{ | |
| "event": { | |
| "contentStart": { | |
| "promptName": "%s", | |
| "contentName": "%s", | |
| "interactive": false, | |
| "type": "TOOL", | |
| "role": "TOOL", | |
| "toolResultInputConfiguration": { | |
| "toolUseId": "%s", | |
| "type": "TEXT", | |
| "textInputConfiguration": { | |
| "mediaType": "text/plain" | |
| } | |
| } | |
| } | |
| } | |
| }''' | |
| CONTENT_END_EVENT = '''{ | |
| "event": { | |
| "contentEnd": { | |
| "promptName": "%s", | |
| "contentName": "%s" | |
| } | |
| } | |
| }''' | |
| PROMPT_END_EVENT = '''{ | |
| "event": { | |
| "promptEnd": { | |
| "promptName": "%s" | |
| } | |
| } | |
| }''' | |
| SESSION_END_EVENT = '''{ | |
| "event": { | |
| "sessionEnd": {} | |
| } | |
| }''' | |
| def start_prompt(self): | |
| """Create a promptStart event""" | |
| get_default_tool_schema = json.dumps({ | |
| "type": "object", | |
| "properties": {}, | |
| "required": [] | |
| }) | |
| get_order_tracking_schema = json.dumps({ | |
| "type": "object", | |
| "properties": { | |
| "orderId": { | |
| "type": "string", | |
| "description": "The order number or ID to track" | |
| }, | |
| "requestNotifications": { | |
| "type": "boolean", | |
| "description": "Whether to set up notifications for this order", | |
| "default": False | |
| } | |
| }, | |
| "required": ["orderId"] | |
| }) | |
| prompt_start_event = { | |
| "event": { | |
| "promptStart": { | |
| "promptName": self.prompt_name, | |
| "textOutputConfiguration": { | |
| "mediaType": "text/plain" | |
| }, | |
| "audioOutputConfiguration": { | |
| "mediaType": "audio/lpcm", | |
| "sampleRateHertz": 24000, | |
| "sampleSizeBits": 16, | |
| "channelCount": 1, | |
| "voiceId": "matthew", | |
| "encoding": "base64", | |
| "audioType": "SPEECH" | |
| }, | |
| "toolUseOutputConfiguration": { | |
| "mediaType": "application/json" | |
| }, | |
| "toolConfiguration": { | |
| "tools": [ | |
| { | |
| "toolSpec": { | |
| "name": "getDateAndTimeTool", | |
| "description": "get information about the current date and time", | |
| "inputSchema": { | |
| "json": get_default_tool_schema | |
| } | |
| } | |
| }, | |
| { | |
| "toolSpec": { | |
| "name": "trackOrderTool", | |
| "description": "Retrieves real-time order tracking information and detailed status updates for customer orders by order ID. Provides estimated delivery dates. Use this tool when customers ask about their order status or delivery timeline.", | |
| "inputSchema": { | |
| "json": get_order_tracking_schema | |
| } | |
| } | |
| } | |
| ] | |
| } | |
| } | |
| } | |
| } | |
| return json.dumps(prompt_start_event) | |
| def tool_result_event(self, content_name, content, role): | |
| """Create a tool result event""" | |
| if isinstance(content, dict): | |
| content_json_string = json.dumps(content) | |
| else: | |
| content_json_string = content | |
| tool_result_event = { | |
| "event": { | |
| "toolResult": { | |
| "promptName": self.prompt_name, | |
| "contentName": content_name, | |
| "content": content_json_string | |
| } | |
| } | |
| } | |
| return json.dumps(tool_result_event) | |
| def __init__(self, model_id='amazon.nova-sonic-v1:0', region='us-east-1'): | |
| """Initialize the stream manager.""" | |
| self.model_id = model_id | |
| self.region = region | |
| # Replace RxPy subjects with asyncio queues | |
| self.audio_input_queue = asyncio.Queue() | |
| self.audio_output_queue = asyncio.Queue() | |
| self.output_queue = asyncio.Queue() | |
| self.response_task = None | |
| self.stream_response = None | |
| self.is_active = False | |
| self.barge_in = False | |
| self.bedrock_client = None | |
| # Audio playback components | |
| self.audio_player = None | |
| # Text response components | |
| self.display_assistant_text = False | |
| self.role = None | |
| # Session information | |
| self.prompt_name = str(uuid.uuid4()) | |
| self.content_name = str(uuid.uuid4()) | |
| self.audio_content_name = str(uuid.uuid4()) | |
| self.toolUseContent = "" | |
| self.toolUseId = "" | |
| self.toolName = "" | |
| def _initialize_client(self): | |
| """Initialize the Bedrock client.""" | |
| # Double-check AWS credentials before initializing | |
| if not os.environ.get("AWS_ACCESS_KEY_ID") or not os.environ.get("AWS_SECRET_ACCESS_KEY"): | |
| missing = [] | |
| if not os.environ.get("AWS_ACCESS_KEY_ID"): | |
| missing.append("AWS_ACCESS_KEY_ID") | |
| if not os.environ.get("AWS_SECRET_ACCESS_KEY"): | |
| missing.append("AWS_SECRET_ACCESS_KEY") | |
| error_msg = f"Missing AWS credentials: {', '.join(missing)}" | |
| if is_huggingface_spaces(): | |
| error_msg += "\nPlease add these as secrets in your Hugging Face Space settings." | |
| else: | |
| error_msg += "\nPlease set these environment variables or add them to a .env file." | |
| raise ValueError(error_msg) | |
| try: | |
| config = Config( | |
| endpoint_uri=f"https://bedrock-runtime.{self.region}.amazonaws.com", | |
| region=self.region, | |
| aws_credentials_identity_resolver=EnvironmentCredentialsResolver(), | |
| http_auth_scheme_resolver=HTTPAuthSchemeResolver(), | |
| http_auth_schemes={"aws.auth#sigv4": SigV4AuthScheme()} | |
| ) | |
| self.bedrock_client = BedrockRuntimeClient(config=config) | |
| except Exception as e: | |
| error_msg = f"Failed to initialize AWS Bedrock client: {str(e)}" | |
| print(error_msg) | |
| raise ValueError(error_msg) | |
| async def initialize_stream(self): | |
| """Initialize the bidirectional stream with Bedrock.""" | |
| if not self.bedrock_client: | |
| self._initialize_client() | |
| try: | |
| self.stream_response = await time_it_async("invoke_model_with_bidirectional_stream", lambda : self.bedrock_client.invoke_model_with_bidirectional_stream( InvokeModelWithBidirectionalStreamOperationInput(model_id=self.model_id))) | |
| self.is_active = True | |
| default_system_prompt = "You are a friend. The user and you will engage in a spoken dialog exchanging the transcripts of a natural real-time conversation." \ | |
| "When reading order numbers, please read each digit individually, separated by pauses. For example, order #1234 should be read as 'order number one-two-three-four' rather than 'order number one thousand two hundred thirty-four'." | |
| # Send initialization events | |
| prompt_event = self.start_prompt() | |
| text_content_start = self.TEXT_CONTENT_START_EVENT % (self.prompt_name, self.content_name, "SYSTEM") | |
| text_content = self.TEXT_INPUT_EVENT % (self.prompt_name, self.content_name, default_system_prompt) | |
| text_content_end = self.CONTENT_END_EVENT % (self.prompt_name, self.content_name) | |
| init_events = [self.START_SESSION_EVENT, prompt_event, text_content_start, text_content, text_content_end] | |
| for event in init_events: | |
| await self.send_raw_event(event) | |
| # Small delay between init events | |
| await asyncio.sleep(0.1) | |
| # Start listening for responses | |
| self.response_task = asyncio.create_task(self._process_responses()) | |
| # Start processing audio input | |
| asyncio.create_task(self._process_audio_input()) | |
| # Wait a bit to ensure everything is set up | |
| await asyncio.sleep(0.1) | |
| debug_print("Stream initialized successfully") | |
| return self | |
| except Exception as e: | |
| self.is_active = False | |
| print(f"Failed to initialize stream: {str(e)}") | |
| raise | |
| async def send_raw_event(self, event_json): | |
| """Send a raw event JSON to the Bedrock stream.""" | |
| if not self.stream_response or not self.is_active: | |
| debug_print("Stream not initialized or closed") | |
| return | |
| event = InvokeModelWithBidirectionalStreamInputChunk( | |
| value=BidirectionalInputPayloadPart(bytes_=event_json.encode('utf-8')) | |
| ) | |
| try: | |
| await self.stream_response.input_stream.send(event) | |
| # For debugging large events, you might want to log just the type | |
| if DEBUG: | |
| if len(event_json) > 200: | |
| event_type = json.loads(event_json).get("event", {}).keys() | |
| debug_print(f"Sent event type: {list(event_type)}") | |
| else: | |
| debug_print(f"Sent event: {event_json}") | |
| except Exception as e: | |
| debug_print(f"Error sending event: {str(e)}") | |
| if DEBUG: | |
| import traceback | |
| traceback.print_exc() | |
| async def send_audio_content_start_event(self): | |
| """Send a content start event to the Bedrock stream.""" | |
| content_start_event = self.CONTENT_START_EVENT % (self.prompt_name, self.audio_content_name) | |
| await self.send_raw_event(content_start_event) | |
| async def _process_audio_input(self): | |
| """Process audio input from the queue and send to Bedrock.""" | |
| while self.is_active: | |
| try: | |
| # Get audio data from the queue | |
| data = await self.audio_input_queue.get() | |
| audio_bytes = data.get('audio_bytes') | |
| if not audio_bytes: | |
| debug_print("No audio bytes received") | |
| continue | |
| # Base64 encode the audio data | |
| blob = base64.b64encode(audio_bytes) | |
| audio_event = self.AUDIO_EVENT_TEMPLATE % ( | |
| self.prompt_name, | |
| self.audio_content_name, | |
| blob.decode('utf-8') | |
| ) | |
| # Send the event | |
| await self.send_raw_event(audio_event) | |
| except asyncio.CancelledError: | |
| break | |
| except Exception as e: | |
| debug_print(f"Error processing audio: {e}") | |
| if DEBUG: | |
| import traceback | |
| traceback.print_exc() | |
| def add_audio_chunk(self, audio_bytes): | |
| """Add an audio chunk to the queue.""" | |
| self.audio_input_queue.put_nowait({ | |
| 'audio_bytes': audio_bytes, | |
| 'prompt_name': self.prompt_name, | |
| 'content_name': self.audio_content_name | |
| }) | |
| async def send_audio_content_end_event(self): | |
| """Send a content end event to the Bedrock stream.""" | |
| if not self.is_active: | |
| debug_print("Stream is not active") | |
| return | |
| content_end_event = self.CONTENT_END_EVENT % (self.prompt_name, self.audio_content_name) | |
| await self.send_raw_event(content_end_event) | |
| debug_print("Audio ended") | |
| async def send_tool_start_event(self, content_name): | |
| """Send a tool content start event to the Bedrock stream.""" | |
| content_start_event = self.TOOL_CONTENT_START_EVENT % (self.prompt_name, content_name, self.toolUseId) | |
| debug_print(f"Sending tool start event: {content_start_event}") | |
| await self.send_raw_event(content_start_event) | |
| async def send_tool_result_event(self, content_name, tool_result): | |
| """Send a tool content event to the Bedrock stream.""" | |
| # Use the actual tool result from processToolUse | |
| tool_result_event = self.tool_result_event(content_name=content_name, content=tool_result, role="TOOL") | |
| debug_print(f"Sending tool result event: {tool_result_event}") | |
| await self.send_raw_event(tool_result_event) | |
| async def send_tool_content_end_event(self, content_name): | |
| """Send a tool content end event to the Bedrock stream.""" | |
| tool_content_end_event = self.CONTENT_END_EVENT % (self.prompt_name, content_name) | |
| debug_print(f"Sending tool content event: {tool_content_end_event}") | |
| await self.send_raw_event(tool_content_end_event) | |
| async def send_prompt_end_event(self): | |
| """Close the stream and clean up resources.""" | |
| if not self.is_active: | |
| debug_print("Stream is not active") | |
| return | |
| prompt_end_event = self.PROMPT_END_EVENT % (self.prompt_name) | |
| await self.send_raw_event(prompt_end_event) | |
| debug_print("Prompt ended") | |
| async def send_session_end_event(self): | |
| """Send a session end event to the Bedrock stream.""" | |
| if not self.is_active: | |
| debug_print("Stream is not active") | |
| return | |
| await self.send_raw_event(self.SESSION_END_EVENT) | |
| self.is_active = False | |
| debug_print("Session ended") | |
| async def _process_responses(self): | |
| """Process incoming responses from Bedrock.""" | |
| try: | |
| while self.is_active: | |
| try: | |
| output = await self.stream_response.await_output() | |
| result = await output[1].receive() | |
| if result.value and result.value.bytes_: | |
| try: | |
| response_data = result.value.bytes_.decode('utf-8') | |
| json_data = json.loads(response_data) | |
| # Handle different response types | |
| if 'event' in json_data: | |
| if 'contentStart' in json_data['event']: | |
| debug_print("Content start detected") | |
| content_start = json_data['event']['contentStart'] | |
| # set role | |
| self.role = content_start['role'] | |
| # Check for speculative content | |
| if 'additionalModelFields' in content_start: | |
| try: | |
| additional_fields = json.loads(content_start['additionalModelFields']) | |
| if additional_fields.get('generationStage') == 'SPECULATIVE': | |
| debug_print("Speculative content detected") | |
| self.display_assistant_text = True | |
| else: | |
| self.display_assistant_text = False | |
| except json.JSONDecodeError: | |
| debug_print("Error parsing additionalModelFields") | |
| elif 'textOutput' in json_data['event']: | |
| text_content = json_data['event']['textOutput']['content'] | |
| role = json_data['event']['textOutput']['role'] | |
| # Check if there is a barge-in | |
| if '{ "interrupted" : true }' in text_content: | |
| debug_print("Barge-in detected. Stopping audio output.") | |
| self.barge_in = True | |
| if (self.role == "ASSISTANT" and self.display_assistant_text): | |
| print(f"Assistant: {text_content}") | |
| elif (self.role == "USER"): | |
| print(f"User: {text_content}") | |
| elif 'audioOutput' in json_data['event']: | |
| audio_content = json_data['event']['audioOutput']['content'] | |
| audio_bytes = base64.b64decode(audio_content) | |
| await self.audio_output_queue.put(audio_bytes) | |
| elif 'toolUse' in json_data['event']: | |
| self.toolUseContent = json_data['event']['toolUse'] | |
| self.toolName = json_data['event']['toolUse']['toolName'] | |
| self.toolUseId = json_data['event']['toolUse']['toolUseId'] | |
| debug_print(f"Tool use detected: {self.toolName}, ID: {self.toolUseId}") | |
| elif 'contentEnd' in json_data['event'] and json_data['event'].get('contentEnd', {}).get('type') == 'TOOL': | |
| debug_print("Processing tool use and sending result") | |
| toolResult = await self.processToolUse(self.toolName, self.toolUseContent) | |
| toolContent = str(uuid.uuid4()) | |
| await self.send_tool_start_event(toolContent) | |
| await self.send_tool_result_event(toolContent, toolResult) | |
| await self.send_tool_content_end_event(toolContent) | |
| elif 'completionEnd' in json_data['event']: | |
| # Handle end of conversation, no more response will be generated | |
| print("End of response sequence") | |
| # Put the response in the output queue for other components | |
| await self.output_queue.put(json_data) | |
| except json.JSONDecodeError: | |
| await self.output_queue.put({"raw_data": response_data}) | |
| except StopAsyncIteration: | |
| # Stream has ended | |
| break | |
| except Exception as e: | |
| # Handle ValidationException properly | |
| if "ValidationException" in str(e): | |
| error_message = str(e) | |
| print(f"Validation error: {error_message}") | |
| else: | |
| print(f"Error receiving response: {e}") | |
| break | |
| except Exception as e: | |
| print(f"Response processing error: {e}") | |
| finally: | |
| self.is_active = False | |
| async def processToolUse(self, toolName, toolUseContent): | |
| """Return the tool result""" | |
| tool = toolName.lower() | |
| debug_print(f"Tool Use Content: {toolUseContent}") | |
| if tool == "getdateandtimetool": | |
| # Get current date in PST timezone | |
| pst_timezone = pytz.timezone("America/Los_Angeles") | |
| pst_date = datetime.datetime.now(pst_timezone) | |
| return { | |
| "formattedTime": pst_date.strftime("%I:%M %p"), | |
| "date": pst_date.strftime("%Y-%m-%d"), | |
| "year": pst_date.year, | |
| "month": pst_date.month, | |
| "day": pst_date.day, | |
| "dayOfWeek": pst_date.strftime("%A").upper(), | |
| "timezone": "PST" | |
| } | |
| elif tool == "trackordertool": | |
| # Extract order ID from toolUseContent | |
| content = toolUseContent.get("content", {}) | |
| content_data = json.loads(content) | |
| order_id = content_data.get("orderId", "") | |
| request_notifications = toolUseContent.get("requestNotifications", False) | |
| # Convert order_id to string if it's an integer | |
| if isinstance(order_id, int): | |
| order_id = str(order_id) | |
| # Validate order ID format | |
| if not order_id or not isinstance(order_id, str): | |
| return { | |
| "error": "Invalid order ID format", | |
| "orderStatus": "", | |
| "estimatedDelivery": "", | |
| "lastUpdate": "" | |
| } | |
| # Create deterministic randomness based on order ID | |
| # This ensures the same order ID always returns the same status | |
| seed = int(hashlib.md5(order_id.encode(), usedforsecurity=False).hexdigest(), 16) % 10000 | |
| random.seed(seed) | |
| # Possible statuses with appropriate weights | |
| statuses = [ | |
| "Order received", | |
| "Processing", | |
| "Preparing for shipment", | |
| "Shipped", | |
| "In transit", | |
| "Out for delivery", | |
| "Delivered", | |
| "Delayed" | |
| ] | |
| weights = [10, 15, 15, 20, 20, 10, 5, 3] | |
| # Select a status based on the weights | |
| status = random.choices(statuses, weights=weights, k=1)[0] | |
| # Generate a realistic estimated delivery date | |
| today = datetime.datetime.now() | |
| # Handle estimated delivery date based on status | |
| if status == "Delivered": | |
| # For delivered items, delivery date is in the past | |
| delivery_days = -random.randint(0, 3) | |
| estimated_delivery = (today + datetime.timedelta(days=delivery_days)).strftime("%Y-%m-%d") | |
| elif status == "Out for delivery": | |
| # For out for delivery, delivery is today | |
| estimated_delivery = today.strftime("%Y-%m-%d") | |
| else: | |
| # For other statuses, delivery is in the future | |
| delivery_days = random.randint(1, 10) | |
| estimated_delivery = (today + datetime.timedelta(days=delivery_days)).strftime("%Y-%m-%d") | |
| # Handle notification request if enabled | |
| notification_message = "" | |
| if request_notifications and status != "Delivered": | |
| notification_message = f"You will receive notifications for order {order_id}" | |
| # Return comprehensive tracking information | |
| tracking_info = { | |
| "orderStatus": status, | |
| "orderNumber": order_id, | |
| "notificationStatus": notification_message | |
| } | |
| # Add appropriate fields based on status | |
| if status == "Delivered": | |
| tracking_info["deliveredOn"] = estimated_delivery | |
| elif status == "Out for delivery": | |
| tracking_info["expectedDelivery"] = "Today" | |
| else: | |
| tracking_info["estimatedDelivery"] = estimated_delivery | |
| # Add location information based on status | |
| if status == "In transit": | |
| tracking_info["currentLocation"] = "Distribution Center" | |
| elif status == "Delivered": | |
| tracking_info["deliveryLocation"] = "Front Door" | |
| # Add additional info for delayed status | |
| if status == "Delayed": | |
| tracking_info["additionalInfo"] = "Weather delays possible" | |
| return tracking_info | |
| async def close(self): | |
| """Close the stream properly.""" | |
| if not self.is_active: | |
| return | |
| self.is_active = False | |
| if self.response_task and not self.response_task.done(): | |
| self.response_task.cancel() | |
| await self.send_audio_content_end_event() | |
| await self.send_prompt_end_event() | |
| await self.send_session_end_event() | |
| if self.stream_response: | |
| await self.stream_response.input_stream.close() | |
| class AudioStreamer: | |
| """Handles continuous microphone input and audio output using separate streams.""" | |
| def __init__(self, stream_manager): | |
| self.stream_manager = stream_manager | |
| self.is_streaming = False | |
| self.loop = asyncio.get_event_loop() | |
| self.input_stream = None | |
| self.output_stream = None | |
| self.p = None | |
| self.use_audio_fallback = False | |
| try: | |
| # Initialize PyAudio | |
| debug_print("AudioStreamer Initializing PyAudio...") | |
| self.p = time_it("AudioStreamerInitPyAudio", pyaudio.PyAudio) | |
| debug_print("AudioStreamer PyAudio initialized") | |
| # Check for available audio devices | |
| input_device_index = None | |
| output_device_index = None | |
| info = self.p.get_host_api_info_by_index(0) | |
| num_devices = info.get('deviceCount') | |
| # Find input and output devices | |
| for i in range(num_devices): | |
| device_info = self.p.get_device_info_by_index(i) | |
| debug_print(f"Device {i}: {device_info['name']}") | |
| if device_info.get('maxInputChannels') > 0 and input_device_index is None: | |
| input_device_index = i | |
| debug_print(f"Selected input device: {device_info['name']}") | |
| if device_info.get('maxOutputChannels') > 0 and output_device_index is None: | |
| output_device_index = i | |
| debug_print(f"Selected output device: {device_info['name']}") | |
| if input_device_index is None or output_device_index is None: | |
| raise ValueError("No suitable audio devices found") | |
| # Initialize separate streams for input and output | |
| # Input stream with callback for microphone | |
| debug_print("Opening input audio stream...") | |
| self.input_stream = time_it("AudioStreamerOpenAudio", lambda: self.p.open( | |
| format=FORMAT, | |
| channels=CHANNELS, | |
| rate=INPUT_SAMPLE_RATE, | |
| input=True, | |
| input_device_index=input_device_index, | |
| frames_per_buffer=CHUNK_SIZE, | |
| stream_callback=self.input_callback | |
| )) | |
| debug_print("input audio stream opened") | |
| # Output stream for direct writing (no callback) | |
| debug_print("Opening output audio stream...") | |
| self.output_stream = time_it("AudioStreamerOpenAudio", lambda: self.p.open( | |
| format=FORMAT, | |
| channels=CHANNELS, | |
| rate=OUTPUT_SAMPLE_RATE, | |
| output=True, | |
| output_device_index=output_device_index, | |
| frames_per_buffer=CHUNK_SIZE | |
| )) | |
| debug_print("output audio stream opened") | |
| except Exception as e: | |
| print(f"Warning: Could not initialize audio devices: {e}") | |
| print("Using fallback mode: Will simulate audio without using real devices") | |
| if self.p: | |
| self.p.terminate() | |
| self.p = None | |
| self.use_audio_fallback = True | |
| def input_callback(self, in_data, frame_count, time_info, status): | |
| """Callback function that schedules audio processing in the asyncio event loop""" | |
| if self.is_streaming and in_data: | |
| # Schedule the task in the event loop | |
| asyncio.run_coroutine_threadsafe( | |
| self.process_input_audio(in_data), | |
| self.loop | |
| ) | |
| return (None, pyaudio.paContinue) | |
| async def process_input_audio(self, audio_data): | |
| """Process a single audio chunk directly""" | |
| try: | |
| # Send audio to Bedrock immediately | |
| self.stream_manager.add_audio_chunk(audio_data) | |
| except Exception as e: | |
| if self.is_streaming: | |
| print(f"Error processing input audio: {e}") | |
| async def generate_simulated_input(self): | |
| """Generate simulated audio input in fallback mode""" | |
| import numpy as np | |
| while self.is_streaming: | |
| try: | |
| # Generate a dummy audio chunk with some basic noise | |
| # This simulates someone speaking into the microphone | |
| samples = np.random.normal(0, 0.01, CHUNK_SIZE * CHANNELS).astype(np.float32) | |
| audio_data = (samples * 32767).astype(np.int16).tobytes() | |
| # Send to Bedrock | |
| self.stream_manager.add_audio_chunk(audio_data) | |
| # Wait a bit between chunks | |
| await asyncio.sleep(0.05) | |
| # Occasionally "end" the simulated speech to get a response | |
| if random.random() < 0.05: # 5% chance to end speech | |
| print("Simulated speech ended, awaiting response...") | |
| await asyncio.sleep(1.0) # Wait longer between "sentences" | |
| except Exception as e: | |
| if self.is_streaming: | |
| print(f"Error generating simulated audio: {e}") | |
| await asyncio.sleep(0.5) | |
| async def play_output_audio(self): | |
| """Play audio responses from Nova Sonic""" | |
| while self.is_streaming: | |
| try: | |
| # Check for barge-in flag | |
| if self.stream_manager.barge_in: | |
| # Clear the audio queue | |
| while not self.stream_manager.audio_output_queue.empty(): | |
| try: | |
| self.stream_manager.audio_output_queue.get_nowait() | |
| except asyncio.QueueEmpty: | |
| break | |
| self.stream_manager.barge_in = False | |
| # Small sleep after clearing | |
| await asyncio.sleep(0.05) | |
| continue | |
| # Get audio data from the stream manager's queue | |
| audio_data = await asyncio.wait_for( | |
| self.stream_manager.audio_output_queue.get(), | |
| timeout=0.1 | |
| ) | |
| if audio_data and self.is_streaming: | |
| if self.use_audio_fallback: | |
| # In fallback mode, just log that we received audio | |
| audio_size = len(audio_data) | |
| print(f"Received {audio_size} bytes of audio from Nova") | |
| # Store the audio for potential replay | |
| self.stream_manager.output_queue.put_nowait({ | |
| "event": { | |
| "audioOutput": { | |
| "content": "Audio would play here if audio devices were available" | |
| } | |
| } | |
| }) | |
| else: | |
| # Write directly to the output stream in smaller chunks | |
| chunk_size = CHUNK_SIZE # Use the same chunk size as the stream | |
| # Write the audio data in chunks to avoid blocking too long | |
| for i in range(0, len(audio_data), chunk_size): | |
| if not self.is_streaming: | |
| break | |
| end = min(i + chunk_size, len(audio_data)) | |
| chunk = audio_data[i:end] | |
| # Create a new function that captures the chunk by value | |
| def write_chunk(data): | |
| return self.output_stream.write(data) | |
| # Pass the chunk to the function | |
| await asyncio.get_event_loop().run_in_executor(None, write_chunk, chunk) | |
| # Brief yield to allow other tasks to run | |
| await asyncio.sleep(0.001) | |
| except asyncio.TimeoutError: | |
| # No data available within timeout, just continue | |
| continue | |
| except Exception as e: | |
| if self.is_streaming: | |
| print(f"Error playing output audio: {str(e)}") | |
| if DEBUG: | |
| import traceback | |
| traceback.print_exc() | |
| await asyncio.sleep(0.05) | |
| async def start_streaming(self): | |
| """Start streaming audio.""" | |
| if self.is_streaming: | |
| return | |
| if self.use_audio_fallback: | |
| print("Starting audio in fallback mode (no real audio devices)...") | |
| else: | |
| print("Starting audio streaming. Speak into your microphone...") | |
| print("Press Enter to stop streaming...") | |
| # Send audio content start event | |
| await time_it_async("send_audio_content_start_event", lambda : self.stream_manager.send_audio_content_start_event()) | |
| self.is_streaming = True | |
| # Start with a welcome message from Nova | |
| await self.send_text_message("Hi there! I'm Nova, your conversation partner. How are you doing today?") | |
| # Set up tasks based on mode | |
| tasks = [] | |
| if self.use_audio_fallback: | |
| # In fallback mode, simulate input | |
| self.input_task = asyncio.create_task(self.generate_simulated_input()) | |
| tasks.append(self.input_task) | |
| else: | |
| # In normal mode, start the actual audio stream | |
| if self.input_stream and not self.input_stream.is_active(): | |
| self.input_stream.start_stream() | |
| # Always process output (even in fallback mode) | |
| self.output_task = asyncio.create_task(self.play_output_audio()) | |
| tasks.append(self.output_task) | |
| # Wait for user to press Enter to stop | |
| await asyncio.get_event_loop().run_in_executor(None, input) | |
| # Once input() returns, stop streaming | |
| await self.stop_streaming() | |
| async def send_text_message(self, text): | |
| """Send a text message to Nova to simulate user input""" | |
| try: | |
| # Create text content start event | |
| content_name = str(time.time()) | |
| text_content_start = self.stream_manager.TEXT_CONTENT_START_EVENT % ( | |
| self.stream_manager.prompt_name, | |
| content_name, | |
| "USER" | |
| ) | |
| await self.stream_manager.send_raw_event(text_content_start) | |
| # Create text input event | |
| text_input = self.stream_manager.TEXT_INPUT_EVENT % ( | |
| self.stream_manager.prompt_name, | |
| content_name, | |
| text | |
| ) | |
| await self.stream_manager.send_raw_event(text_input) | |
| # Create content end event | |
| content_end = self.stream_manager.CONTENT_END_EVENT % ( | |
| self.stream_manager.prompt_name, | |
| content_name | |
| ) | |
| await self.stream_manager.send_raw_event(content_end) | |
| print(f"Sent text message to Nova: {text}") | |
| return True | |
| except Exception as e: | |
| print(f"Error sending text message: {e}") | |
| return False | |
| async def stop_streaming(self): | |
| """Stop streaming audio.""" | |
| if not self.is_streaming: | |
| return | |
| self.is_streaming = False | |
| # Cancel the tasks | |
| tasks = [] | |
| if hasattr(self, 'input_task') and not self.input_task.done(): | |
| tasks.append(self.input_task) | |
| if hasattr(self, 'output_task') and not self.output_task.done(): | |
| tasks.append(self.output_task) | |
| for task in tasks: | |
| task.cancel() | |
| if tasks: | |
| await asyncio.gather(*tasks, return_exceptions=True) | |
| # Clean up audio resources if not in fallback mode | |
| if not self.use_audio_fallback: | |
| # Stop and close the streams | |
| if self.input_stream: | |
| try: | |
| if self.input_stream.is_active(): | |
| self.input_stream.stop_stream() | |
| self.input_stream.close() | |
| except Exception as e: | |
| print(f"Error closing input stream: {e}") | |
| if self.output_stream: | |
| try: | |
| if self.output_stream.is_active(): | |
| self.output_stream.stop_stream() | |
| self.output_stream.close() | |
| except Exception as e: | |
| print(f"Error closing output stream: {e}") | |
| if self.p: | |
| try: | |
| self.p.terminate() | |
| except Exception as e: | |
| print(f"Error terminating PyAudio: {e}") | |
| # Always close the stream manager | |
| await self.stream_manager.close() | |
| async def main(debug=False): | |
| """Main function to run the application.""" | |
| global DEBUG | |
| DEBUG = debug | |
| # Check AWS credentials first | |
| missing_creds = [] | |
| if not os.environ.get("AWS_ACCESS_KEY_ID"): | |
| missing_creds.append("AWS_ACCESS_KEY_ID") | |
| if not os.environ.get("AWS_SECRET_ACCESS_KEY"): | |
| missing_creds.append("AWS_SECRET_ACCESS_KEY") | |
| if missing_creds: | |
| error_message = f"Missing AWS credentials: {', '.join(missing_creds)}" | |
| if is_huggingface_spaces(): | |
| error_message += "\nPlease add these secrets in your Hugging Face Space's settings." | |
| else: | |
| error_message += "\nPlease set these environment variables or create a .env file." | |
| print(error_message) | |
| return | |
| try: | |
| # Create stream manager | |
| stream_manager = BedrockStreamManager(model_id='amazon.nova-sonic-v1:0', region=os.environ.get("AWS_DEFAULT_REGION", "us-east-1")) | |
| # Create audio streamer | |
| audio_streamer = AudioStreamer(stream_manager) | |
| # Initialize the stream | |
| await time_it_async("initialize_stream", stream_manager.initialize_stream) | |
| # This will run until the user presses Enter | |
| await audio_streamer.start_streaming() | |
| except KeyboardInterrupt: | |
| print("Interrupted by user") | |
| except Exception as e: | |
| print(f"Error running application: {e}") | |
| if DEBUG: | |
| import traceback | |
| traceback.print_exc() | |
| finally: | |
| # Clean up | |
| if 'audio_streamer' in locals(): | |
| await audio_streamer.stop_streaming() | |
| if __name__ == "__main__": | |
| import argparse | |
| parser = argparse.ArgumentParser(description='Nova Sonic Python Streaming') | |
| parser.add_argument('--debug', action='store_true', help='Enable debug mode') | |
| args = parser.parse_args() | |
| # Use environment variables for AWS credentials | |
| # These should be set in your environment or Hugging Face secrets | |
| # os.environ['AWS_ACCESS_KEY_ID'] - set via environment variable | |
| # os.environ['AWS_SECRET_ACCESS_KEY'] - set via environment variable | |
| # os.environ['AWS_DEFAULT_REGION'] - defaults to "us-east-1" if not set | |
| if not os.environ.get('AWS_DEFAULT_REGION'): | |
| os.environ['AWS_DEFAULT_REGION'] = "us-east-1" | |
| # Run the main function | |
| try: | |
| asyncio.run(main(debug=args.debug)) | |
| except Exception as e: | |
| print(f"Application error: {e}") | |
| if args.debug: | |
| import traceback | |
| traceback.print_exc() | |