MFLF-Demo / src /chatbot /llm_engine.py
Focussy's picture
change/use-4.1-mini-for-testing-instead
e60f7d2
# agent_llm_engine.py
import os
from langchain_openai import ChatOpenAI
import openai
from prompts.chat_completion_prompts import (
feedback_generation_prompt,
expense_description_feedback_prompt,
map_input_to_project_deliverables_prompt
)
from dotenv import load_dotenv
from datetime import datetime
# Load environment variables from .env file
load_dotenv()
client = openai.OpenAI()
MODEL = "gpt-4.1-mini"
llm_overall_agent = ChatOpenAI(
model=MODEL,
temperature=0,
openai_api_key=os.getenv("OPENAI_API_KEY"),
)
def generate_expense_info_feedback(acceptance_criteria: dict, form_info: dict) -> str:
"""
Generate feedback using OpenAI Chat Completion based on form_info.
"""
current_date = datetime.now()
formatted_date = current_date.strftime(f"%d %B {current_date.year}") # Format with Buddhist year
print("current date: {}".format(formatted_date))
response = client.responses.create(
model=MODEL,
temperature=0,
input=feedback_generation_prompt.format(formatted_date, acceptance_criteria, form_info)
)
return response.output_text
def generate_expense_description_feedback(project_tasks: dict, expense_description: str) -> str:
"""
Generate feedback using OpenAI Chat Completion based on project_tasks and expense_description.
"""
response = client.responses.create(
model=MODEL,
temperature=0,
input=expense_description_feedback_prompt.format(project_tasks, expense_description)
)
return response.output_text
def map_input_to_project_deliverable(user_input: str, project_deliverables: str) -> str:
"""
Map user input to project deliverables.
"""
response = client.responses.create(
model=MODEL,
temperature=0,
input=map_input_to_project_deliverables_prompt.format(user_input, project_deliverables)
)
return response.output_text