ensemble_machine / utils.py
floydchow7's picture
minor UI change
820df80
raw
history blame
4.42 kB
import openai
import os
def get_api_key(local = False):
if local:
from dotenv import load_dotenv
load_dotenv()
return os.getenv('OPEN_API_KEY')
def get_user_input(prompt):
return input(prompt)
def choose_cooperation_type():
choice = get_user_input("Please choose the cooperation type: \n"
+"1. Sequential: human provide an answer first and then AI provide the answer based on it.\n"
+"2. Parallel: human and AI give answers seperately and then AI does the merge.\n")
if choice == '1':
return 'sequential'
elif choice == '2':
return 'parallel'
else:
print("Invalid choice. Please try again.")
return choose_cooperation_type()
def describe_task():
task_description = get_user_input("Please describe your task: ")
if task_description.strip() == "":
# print("Task description cannot be empty. Please try again.")
# return describe_task()
task_description = "Write a poem about the moon in 3 lines."
return task_description
def generate_text_with_gpt(prompts, api_key = None):
"""Generate text using the GPT-3 model."""
if api_key:
openai.api_key = api_key
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Please assist."},
{"role": "user", "content": prompts}
]
)
return response['choices'][0]['message']['content']
except Exception as e:
print(f"Error occurred when generating texts: {e}")
return ""
def generate_ai_initial_answer(task_description, api_key=None):
prompt = f"Given the task: {task_description}, provide an answer: "
return generate_text_with_gpt(prompt, api_key)
def merge_texts_parallel(task_description, human_text, ai_text, api_key = None):
prompt = f"Given the task: {task_description}, there are two answers provided:\n" + \
f"The first answer: {human_text}\nThe second answer: {ai_text}\n" + \
f"Merge the two answers into one in a coherent way: "
return generate_text_with_gpt(prompt, api_key)
def merge_texts_sequential(task_description, human_text, api_key = None):
prompt = f"Given the task:{task_description}, here is the answer provided by the human: {human_text}\n" + \
f"Refine this response and ensure the final answer aligns with the human's intent:"
return generate_text_with_gpt(prompt, api_key)
def modify_with_suggestion(task_description, text, suggestions, api_key = None):
prompt = f"Given the task:{task_description}, the answer provided is: {text}\n" + \
f"Modify the answer based on the following suggestions: {suggestions}"
return generate_text_with_gpt(prompt, api_key)
def get_evaluation_with_gpt(task_description, text, api_key=None):
prompt = (
f"Given the task: {task_description}, the provided answer is: {text}\n"
f"Evaluate the answer using a scale from 0 to 10. Scores should reflect how well the ideas fit each specific evaluation criteria:\n"
f"0-2: Poor fit; the idea demonstrates minimal relevance to the criteria.\n"
f"3-5: Partial fit; the idea shows some relevance but contains significant shortcomings.\n"
f"6-8: Good fit; the idea aligns well with the criteria, showing clear relevance and thoughtfulness.\n"
f"9-10: Excellent fit; the idea fully aligns with the criteria, demonstrating exceptional insight.\n\n"
f"When evaluating, use the entire scoring range and avoid defaulting to mid-range scores.\n\n"
f"Evaluate based on the following criteria:\n"
f"Novelty: The uniqueness and innovation of the ideas.\n"
f"Implementability: The practicality of suggested actions.\n"
f"Inimitability: The difficulty for competitors to replicate the ideas.\n"
f"Alignment: The degree to which the ideas align with Airbnb’s goals and 17 SDGs.\n\n"
f"Please format the output exactly as follows:\n"
f"Novelty: [Score]\n"
f"Implementability: [Score]\n"
f"Inimitability: [Score]\n"
f"Alignment: [Score]\n"
)
return generate_text_with_gpt(prompt, api_key)
def display_merged_output(session_index, session_manager):
session = session_manager.get_session(session_index)
return session['merged_final_answer']