File size: 7,345 Bytes
363cda9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
"""
Audio processing utilities for voice screening.
Handles audio combining, resampling, and WAV export.
"""
import io
import wave
import struct
import logging
from typing import List, Dict

logger = logging.getLogger(__name__)


def combine_and_export_audio(
    user_chunks: List[Dict],
    agent_chunks: List[Dict],
    session_start_time: float,
    session_id: str
) -> bytes:
    """
    Combine user and agent audio chunks and export as WAV file.
    
    Audio chunks are continuous streams - we concatenate them in order and mix
    based on when each stream actually started relative to session start.
    
    Args:
        user_chunks: List of dicts with 'timestamp' and 'data' (bytes)
        agent_chunks: List of dicts with 'timestamp' and 'data' (bytes)
        session_start_time: Session start timestamp for relative positioning
        session_id: The session ID for logging.
    
    Returns:
        bytes: WAV file data.
    """
    if not session_start_time:
        raise ValueError("Session start time not found")
    
    if not user_chunks and not agent_chunks:
        logger.warning(f"No audio chunks found for session {session_id}")
        # Return empty WAV file
        wav_buffer = io.BytesIO()
        with wave.open(wav_buffer, 'wb') as wav_file:
            wav_file.setnchannels(1)  # Mono
            wav_file.setsampwidth(2)  # 16-bit = 2 bytes
            wav_file.setframerate(24000)  # OpenAI uses 24kHz
            wav_file.writeframes(b'')
        return wav_buffer.getvalue()
    
    # Sample rate: OpenAI Realtime API uses 24kHz PCM16
    SAMPLE_RATE = 24000
    BYTES_PER_SAMPLE = 2  # 16-bit = 2 bytes
    
    # Detect user audio sample rate (browser typically captures at 48kHz)
    # NOTE: Frontend now resamples to 24kHz before sending, so we can trust it matches.
    user_sample_rate = SAMPLE_RATE  # Always 24kHz
    logger.info(f"Using standard sample rate: {user_sample_rate}Hz")
    
    # Process and prepare all chunks with their timestamps
    # We need to interleave user and agent chunks based on when they actually occurred
    all_chunks = []
    
    # Process user chunks (resample if needed)
    for chunk in user_chunks:
        chunk_data = chunk["data"]
        chunk_samples = len(chunk_data) // BYTES_PER_SAMPLE
        
        all_chunks.append({
            "timestamp": chunk["timestamp"],
            "type": "user",
            "data": chunk_data,
            "samples": chunk_samples
        })
    
    # Process agent chunks (already at 24kHz, no resampling needed)
    for chunk in agent_chunks:
        chunk_data = chunk["data"]
        chunk_samples = len(chunk_data) // BYTES_PER_SAMPLE
        
        all_chunks.append({
            "timestamp": chunk["timestamp"],
            "type": "agent",
            "data": chunk_data,
            "samples": chunk_samples
        })
    
    # Sort all chunks by timestamp to get chronological order
    all_chunks.sort(key=lambda x: x["timestamp"])
    
    # Now place chunks sequentially, maintaining continuity within each stream
    # Track cumulative position for each stream type
    user_cumulative = None
    agent_cumulative = None
    
    chunk_placements = []
    
    for chunk in all_chunks:
        chunk_timestamp = chunk["timestamp"]
        chunk_offset_seconds = chunk_timestamp - session_start_time
        chunk_start_sample = max(0, int(chunk_offset_seconds * SAMPLE_RATE))
        
        if chunk["type"] == "user":
            # For user audio, maintain continuity within user stream
            if user_cumulative is None:
                user_cumulative = chunk_start_sample
            
            # Ensure no gaps - if there's a gap, start from where previous user chunk ended
            if chunk_start_sample < user_cumulative:
                chunk_start_sample = user_cumulative
            
            chunk_placements.append({
                "start_sample": chunk_start_sample,
                "data": chunk["data"],
                "samples": chunk["samples"],
                "type": "user"
            })
            
            user_cumulative = chunk_start_sample + chunk["samples"]
        else:  # agent
            # For agent audio, maintain continuity within agent stream
            if agent_cumulative is None:
                agent_cumulative = chunk_start_sample
            
            # Ensure no gaps - if there's a gap, start from where previous agent chunk ended
            if chunk_start_sample < agent_cumulative:
                chunk_start_sample = agent_cumulative
            
            chunk_placements.append({
                "start_sample": chunk_start_sample,
                "data": chunk["data"],
                "samples": chunk["samples"],
                "type": "agent"
            })
            
            agent_cumulative = chunk_start_sample + chunk["samples"]
    
    # Calculate total duration needed
    total_samples = 0
    if chunk_placements:
        for placement in chunk_placements:
            total_samples = max(total_samples, placement["start_sample"] + placement["samples"])
    
    if total_samples == 0:
        logger.warning(f"No audio samples to export for session {session_id}")
        wav_buffer = io.BytesIO()
        with wave.open(wav_buffer, 'wb') as wav_file:
            wav_file.setnchannels(1)
            wav_file.setsampwidth(2)
            wav_file.setframerate(SAMPLE_RATE)
            wav_file.writeframes(b'')
        return wav_buffer.getvalue()
    
    # Initialize output buffer with zeros
    output_buffer = bytearray(total_samples * BYTES_PER_SAMPLE)
    
    # Place all chunks in chronological order
    for placement in chunk_placements:
        chunk_data = placement["data"]
        chunk_start = placement["start_sample"]
        chunk_samples = placement["samples"]
        
        for i in range(chunk_samples):
            sample_offset = chunk_start + i
            if 0 <= sample_offset < total_samples:
                # Read PCM16 sample from chunk
                sample_value = struct.unpack('<h', chunk_data[i*2:(i+1)*2])[0]
                # Get current value from buffer
                current_offset = sample_offset * BYTES_PER_SAMPLE
                current_value = struct.unpack('<h', output_buffer[current_offset:current_offset+2])[0]
                # Mix (add) and clamp to prevent clipping
                mixed_value = max(-32768, min(32767, current_value + sample_value))
                # Write back to buffer
                struct.pack_into('<h', output_buffer, current_offset, mixed_value)
    
    # Create WAV file
    wav_buffer = io.BytesIO()
    with wave.open(wav_buffer, 'wb') as wav_file:
        wav_file.setnchannels(1)  # Mono
        wav_file.setsampwidth(2)  # 16-bit = 2 bytes
        wav_file.setframerate(SAMPLE_RATE)
        wav_file.writeframes(bytes(output_buffer))
    
    duration_seconds = total_samples / SAMPLE_RATE
    user_chunk_count = len([c for c in chunk_placements if c["type"] == "user"])
    agent_chunk_count = len([c for c in chunk_placements if c["type"] == "agent"])
    logger.info(f"Combined audio: {user_chunk_count} user chunks, {agent_chunk_count} agent chunks, "
                f"total {total_samples} samples ({duration_seconds:.2f}s), sorted chronologically")
    
    return wav_buffer.getvalue()