Spaces:
Sleeping
Sleeping
File size: 1,775 Bytes
162a97d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import os
from openai import OpenAI
from dotenv import load_dotenv
import logging
load_dotenv()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def generate_response(user_prompt, model_name, system_prompt):
try:
# Get API key from environment variable
api_key = os.getenv("OPENROUTER_API_KEY")
if not api_key:
return "Error: API key not found in environment variables"
# Initialize OpenRouter client using OpenAI with custom base URL
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=api_key,
default_headers={
"HTTP-Referer": "null",
"X-Title": "LLMInterface",
}
)
# Map display names to actual model names
model_mapping = {
"Claude Haiku": "anthropic/claude-3.5-haiku-20241022:beta",
"DeepSeek": "deepseek/deepseek-r1:free",
"Claude Premium": "anthropic/claude-3.5-sonnet",
"GPT Pro": "openai/gpt-4-0125-preview"
}
# Get the actual model name
actual_model = model_mapping.get(model_name)
if not actual_model:
return "Error: Invalid model selection"
# Make the API call
response = client.chat.completions.create(
model=actual_model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
)
return response.choices[0].message.content
except Exception as e:
logger.error(f"Error in generate_response: {str(e)}")
return f"Error occurred: {str(e)}"
|