| |
| """ |
| Teste para Pipeline Experimental Qwen3-0.6B |
| =========================================== |
| Testa a implementação experimental usando Qwen3-0.6B |
| """ |
|
|
| import sys |
| import os |
| sys.path.append(os.path.join(os.path.dirname(__file__), '..')) |
|
|
| import numpy as np |
| import torch |
| from pipelines.llama_omni2_experimental_qwen3 import LLaMAOmni2Qwen3Experimental |
|
|
| def test_qwen3_pipeline(): |
| """Teste básico do pipeline experimental""" |
| print("\n" + "="*60) |
| print("🧪 TESTE PIPELINE EXPERIMENTAL - QWEN3-0.6B") |
| print("="*60) |
| |
| |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| print(f"🖥️ Device: {device}") |
| |
| |
| try: |
| print("\n📦 Carregando pipeline experimental...") |
| model = LLaMAOmni2Qwen3Experimental(device=device) |
| print("✅ Pipeline carregado com sucesso!") |
| except Exception as e: |
| print(f"❌ Erro ao carregar pipeline: {e}") |
| return False |
| |
| |
| print("\n🎵 Gerando áudio de teste...") |
| |
| sample_rate = 16000 |
| duration = 3 |
| audio = np.random.randn(sample_rate * duration).astype(np.float32) * 0.01 |
| print(f" • Áudio shape: {audio.shape}") |
| print(f" • Duração: {duration}s") |
| |
| |
| print("\n🔄 Processando...") |
| try: |
| import time |
| start_time = time.time() |
| |
| response_text, audio_path = model.process(audio) |
| |
| end_time = time.time() |
| processing_time = end_time - start_time |
| |
| print(f"⏱️ Tempo de processamento: {processing_time:.2f}s") |
| |
| |
| print("\n📊 RESULTADOS:") |
| print("-" * 40) |
| |
| if response_text: |
| print(f"✅ Resposta obtida: '{response_text}'") |
| print(f" • Comprimento: {len(response_text)} caracteres") |
| else: |
| print("❌ Nenhuma resposta gerada") |
| return False |
| |
| if audio_path and os.path.exists(audio_path): |
| print(f"🔊 Áudio gerado: {audio_path}") |
| file_size = os.path.getsize(audio_path) / 1024 |
| print(f" • Tamanho: {file_size:.1f} KB") |
| |
| |
| os.remove(audio_path) |
| else: |
| print("⚠️ Áudio não gerado") |
| |
| return True |
| |
| except Exception as e: |
| print(f"❌ Erro durante processamento: {e}") |
| import traceback |
| traceback.print_exc() |
| return False |
|
|
| def test_qwen3_components(): |
| """Teste dos componentes individuais""" |
| print("\n" + "="*60) |
| print("🔧 TESTE DOS COMPONENTES QWEN3") |
| print("="*60) |
| |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| |
| try: |
| model = LLaMAOmni2Qwen3Experimental(device=device) |
| |
| |
| print("\n1. Testando load_speech...") |
| audio = np.random.randn(16000 * 2).astype(np.float32) |
| mel = model.load_speech(audio) |
| print(f" • Audio shape: {audio.shape}") |
| print(f" • Mel shape: {mel.shape}") |
| print(" ✅ load_speech funcionando") |
| |
| |
| print("\n2. Testando encode_speech...") |
| speech_tensor = mel.unsqueeze(0).to(device) |
| features = model.encode_speech(speech_tensor) |
| print(f" • Input shape: {speech_tensor.shape}") |
| print(f" • Output shape: {features.shape}") |
| print(" ✅ encode_speech funcionando") |
| |
| |
| print(f"\n3. Hidden size do Qwen3: {model.hidden_size}") |
| print(" ✅ Configuração correta") |
| |
| return True |
| |
| except Exception as e: |
| print(f"❌ Erro nos componentes: {e}") |
| import traceback |
| traceback.print_exc() |
| return False |
|
|
| def main(): |
| """Função principal de teste""" |
| print("🧪 TESTES DO PIPELINE EXPERIMENTAL QWEN3-0.6B") |
| |
| |
| success1 = test_qwen3_components() |
| |
| |
| success2 = test_qwen3_pipeline() |
| |
| |
| print("\n" + "="*60) |
| print("📋 RESUMO DOS TESTES") |
| print("="*60) |
| print(f"• Componentes: {'✅ PASSOU' if success1 else '❌ FALHOU'}") |
| print(f"• Pipeline completo: {'✅ PASSOU' if success2 else '❌ FALHOU'}") |
| |
| if success1 and success2: |
| print("\n🎉 TODOS OS TESTES PASSARAM!") |
| print("Pipeline experimental Qwen3-0.6B está funcionando!") |
| else: |
| print("\n⚠️ ALGUNS TESTES FALHARAM") |
| print("Verifique as mensagens de erro acima") |
| |
| print("="*60) |
|
|
| if __name__ == "__main__": |
| main() |