File size: 1,925 Bytes
b15be87
 
 
 
 
0d2b029
 
 
 
 
b15be87
 
 
 
 
 
 
e60f7d2
8300b42
b15be87
8300b42
b15be87
 
 
 
 
 
 
 
4829ba9
0957e29
 
b15be87
 
8300b42
b15be87
68752b9
b15be87
 
6640d96
 
 
 
 
 
 
8300b42
6640d96
 
 
 
0d2b029
 
 
 
 
 
 
8300b42
0d2b029
 
 
 
b15be87
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# agent_llm_engine.py

import os
from langchain_openai import ChatOpenAI
import openai
from prompts.chat_completion_prompts import (
    feedback_generation_prompt, 
    expense_description_feedback_prompt,
    map_input_to_project_deliverables_prompt
)
from dotenv import load_dotenv
from datetime import datetime

# Load environment variables from .env file
load_dotenv()
client = openai.OpenAI()

MODEL = "gpt-4.1-mini"

llm_overall_agent = ChatOpenAI(
    model=MODEL, 
    temperature=0, 
    openai_api_key=os.getenv("OPENAI_API_KEY"),
)

def generate_expense_info_feedback(acceptance_criteria: dict, form_info: dict) -> str:
    """
    Generate feedback using OpenAI Chat Completion based on form_info.
    """
    current_date = datetime.now()
    formatted_date = current_date.strftime(f"%d %B {current_date.year}")  # Format with Buddhist year
    print("current date: {}".format(formatted_date))

    response = client.responses.create(
        model=MODEL,
        temperature=0,
        input=feedback_generation_prompt.format(formatted_date, acceptance_criteria, form_info)
    )

    return response.output_text

def generate_expense_description_feedback(project_tasks: dict, expense_description: str) -> str:
    """
    Generate feedback using OpenAI Chat Completion based on project_tasks and expense_description.
    """
    response = client.responses.create(
        model=MODEL,
        temperature=0,
        input=expense_description_feedback_prompt.format(project_tasks, expense_description)
    )

    return response.output_text

def map_input_to_project_deliverable(user_input: str, project_deliverables: str) -> str:
    """
    Map user input to project deliverables.
    """
    response = client.responses.create(
        model=MODEL,
        temperature=0,
        input=map_input_to_project_deliverables_prompt.format(user_input, project_deliverables)
    )

    return response.output_text