| |
| """Quick test script for Azure OpenAI""" |
| import os |
| import sys |
| from langchain_openai import AzureChatOpenAI |
|
|
| |
| api_key = os.getenv("AZURE_OPENAI_API_KEY") |
| endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") |
| api_version = os.getenv("OPENAI_API_VERSION", "2024-12-01-preview") |
| model = os.getenv("OPENAI_MODEL", "gpt-4.1") |
| temperature = float(os.getenv("AZURE_OPENAI_TEMPERATURE", "0.1")) |
| max_tokens = int(os.getenv("AZURE_OPENAI_MAX_TOKENS", "8192")) |
| timeout = int(os.getenv("AZURE_OPENAI_TIMEOUT", "600")) |
|
|
| if not api_key or not endpoint: |
| print("❌ Error: AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT are required") |
| sys.exit(1) |
|
|
| print("🔍 Testing Azure OpenAI connection...") |
| print(f" Endpoint: {endpoint}") |
| print(f" Model: {model}") |
| print(f" API Version: {api_version}") |
|
|
| try: |
| llm = AzureChatOpenAI( |
| model=model, |
| azure_deployment=model, |
| api_key=api_key, |
| temperature=temperature, |
| max_tokens=max_tokens, |
| timeout=timeout, |
| max_retries=1, |
| azure_endpoint=endpoint, |
| api_version=api_version |
| ) |
| |
| print("✅ Client initialized successfully") |
| print("📤 Sending test message...") |
| |
| response = llm.invoke([{"role": "user", "content": "Say 'Hello' in one word."}]) |
| |
| print(f"✅ Success! Response: {response.content}") |
| print("🎉 Azure OpenAI is working correctly!") |
| |
| except Exception as e: |
| print(f"❌ Error: {e}") |
| sys.exit(1) |
|
|
|
|