LLMates / utils.py
Sanchit Verma
Refactor code for better readability and maintainability
abcf75a
import requests
import openai
from config import (
OPENAI_API_KEY,
OPENAI_MODEL,
USE_OLLAMA,
OLLAMA_MODEL,
USE_OPENROUTER,
OPENROUTER_API_KEY,
OPENROUTER_MODEL,
TEMPERATURE,
)
openai.api_key = OPENAI_API_KEY
def query_openai(messages):
try:
response = openai.ChatCompletion.create(
model=OPENAI_MODEL, messages=messages, temperature=TEMPERATURE
)
return response["choices"][0]["message"]["content"]
except Exception as e:
return f"⚠️ OpenAI Error: {e}"
def query_openrouter(messages):
headers = {
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"Content-Type": "application/json",
}
payload = {
"model": OPENROUTER_MODEL,
"messages": messages,
"temperature": TEMPERATURE,
}
try:
response = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers=headers,
json=payload,
)
return response.json()["choices"][0]["message"]["content"]
except Exception as e:
return f"⚠️ OpenRouter Error: {e}"
def query_ollama(prompt):
try:
response = requests.post(
"http://localhost:11434/api/generate",
json={"model": OLLAMA_MODEL, "prompt": prompt},
)
return response.json()["response"]
except Exception as e:
return f"⚠️ Ollama Error: {e}"
def generate_response(persona_prompt, user_input, history):
# Handle Ollama case
if USE_OLLAMA:
# Convert history to Ollama format
full_prompt = f"{persona_prompt}\n\n"
for msg in history:
full_prompt += f"{msg['role'].capitalize()}: {msg['content']}\n"
full_prompt += f"User: {user_input}\nBot:"
reply = query_ollama(full_prompt)
return history + [
{"role": "user", "content": user_input},
{"role": "assistant", "content": reply},
]
# Handle OpenAI/OpenRouter case
messages = [{"role": "system", "content": persona_prompt}]
messages.extend(history)
messages.append({"role": "user", "content": user_input})
if USE_OPENROUTER:
reply = query_openrouter(messages)
else:
reply = query_openai(messages)
# Return the complete history with the new messages
return history + [
{"role": "user", "content": user_input},
{"role": "assistant", "content": reply},
]