MalikShehram's picture
Update services/client.py
faed992 verified
from concurrent.futures import ThreadPoolExecutor
from openai import OpenAI
import os
from dotenv import load_dotenv
from typing import List
load_dotenv()
# constants
BASE_URL = "https://api.novita.ai/v3/openai"
MODEL_NAME = "meta-llama/llama-3.2-1b-instruct"
# Grab the key, but use a dummy string if missing so the server doesn't crash on boot
my_key = os.environ.get("NOVITA_API_KEY", "dummy_key")
client = OpenAI(
base_url=BASE_URL,
api_key=my_key,
)
def generate_commented_code(code: str, max_token: int):
system_prompt = (
"You are a senior software engineer skilled in writing precise comments only. "
"Add clear, concise, and professional comments to the provided code. "
"Include docstrings for all functions/classes and inline comments for complex logic. "
"Strictly do not add or provide alternative code."
)
try:
chat_responses = client.chat.completions.create(
model=MODEL_NAME,
temperature=0.7,
stream=False,
max_tokens=max_token,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": code}
]
)
return chat_responses.choices[0].message.content
except Exception as ex:
return f"Error generating comments: {ex}"
def generate_documentation(code: str, max_token: int):
system_prompt = (
"You are a senior software engineer skilled in writing structured documentation only. "
"Produce a strictly formatted JSON object with the following keys: "
"'name', 'description', 'parameters', 'returns', 'exceptions', and 'examples'."
)
try:
chat_responses = client.chat.completions.create(
model=MODEL_NAME,
temperature=0.7,
stream=False,
max_tokens=max_token,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": code}
]
)
return chat_responses.choices[0].message.content
except Exception as ex:
return f"Error generating documentation: {ex}"
def generate_improved_code(code: str, max_token: int):
system_prompt = (
"You are a senior software engineer. Analyze the code for possible improvements. "
"If optimization is possible, provide a cleaner or more efficient version. "
"Otherwise, write 'Already optimal'. "
"Always report the time complexity of both the original and improved versions."
)
try:
chat_responses = client.chat.completions.create(
model=MODEL_NAME,
temperature=0.7,
stream=False,
max_tokens=max_token,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": code}
]
)
return chat_responses.choices[0].message.content
except Exception as ex:
return f"Error generating improved code: {ex}"
def generate_code_comments_and_docs(code: str, max_token: int = 1024) -> List:
try:
with ThreadPoolExecutor() as executor:
future1 = executor.submit(generate_commented_code, code, max_token)
future2 = executor.submit(generate_documentation, code, max_token)
future3 = executor.submit(generate_improved_code, code, max_token)
commented = future1.result()
documentation = future2.result()
improved = future3.result()
return [commented, documentation, improved]
except Exception as ex:
return [f"Error generating comments: {ex}", "{}", f"Error: {ex}"]