File size: 3,715 Bytes
609ce86 faed992 609ce86 47e2273 faed992 609ce86 faed992 47e2273 609ce86 47e2273 609ce86 47e2273 609ce86 47e2273 609ce86 47e2273 609ce86 47e2273 609ce86 47e2273 609ce86 47e2273 609ce86 47e2273 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | from concurrent.futures import ThreadPoolExecutor
from openai import OpenAI
import os
from dotenv import load_dotenv
from typing import List
load_dotenv()
# constants
BASE_URL = "https://api.novita.ai/v3/openai"
MODEL_NAME = "meta-llama/llama-3.2-1b-instruct"
# Grab the key, but use a dummy string if missing so the server doesn't crash on boot
my_key = os.environ.get("NOVITA_API_KEY", "dummy_key")
client = OpenAI(
base_url=BASE_URL,
api_key=my_key,
)
def generate_commented_code(code: str, max_token: int):
system_prompt = (
"You are a senior software engineer skilled in writing precise comments only. "
"Add clear, concise, and professional comments to the provided code. "
"Include docstrings for all functions/classes and inline comments for complex logic. "
"Strictly do not add or provide alternative code."
)
try:
chat_responses = client.chat.completions.create(
model=MODEL_NAME,
temperature=0.7,
stream=False,
max_tokens=max_token,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": code}
]
)
return chat_responses.choices[0].message.content
except Exception as ex:
return f"Error generating comments: {ex}"
def generate_documentation(code: str, max_token: int):
system_prompt = (
"You are a senior software engineer skilled in writing structured documentation only. "
"Produce a strictly formatted JSON object with the following keys: "
"'name', 'description', 'parameters', 'returns', 'exceptions', and 'examples'."
)
try:
chat_responses = client.chat.completions.create(
model=MODEL_NAME,
temperature=0.7,
stream=False,
max_tokens=max_token,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": code}
]
)
return chat_responses.choices[0].message.content
except Exception as ex:
return f"Error generating documentation: {ex}"
def generate_improved_code(code: str, max_token: int):
system_prompt = (
"You are a senior software engineer. Analyze the code for possible improvements. "
"If optimization is possible, provide a cleaner or more efficient version. "
"Otherwise, write 'Already optimal'. "
"Always report the time complexity of both the original and improved versions."
)
try:
chat_responses = client.chat.completions.create(
model=MODEL_NAME,
temperature=0.7,
stream=False,
max_tokens=max_token,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": code}
]
)
return chat_responses.choices[0].message.content
except Exception as ex:
return f"Error generating improved code: {ex}"
def generate_code_comments_and_docs(code: str, max_token: int = 1024) -> List:
try:
with ThreadPoolExecutor() as executor:
future1 = executor.submit(generate_commented_code, code, max_token)
future2 = executor.submit(generate_documentation, code, max_token)
future3 = executor.submit(generate_improved_code, code, max_token)
commented = future1.result()
documentation = future2.result()
improved = future3.result()
return [commented, documentation, improved]
except Exception as ex:
return [f"Error generating comments: {ex}", "{}", f"Error: {ex}"] |