|
|
import os |
|
|
from dotenv import load_dotenv |
|
|
import openai |
|
|
import random |
|
|
import requests |
|
|
from ast import literal_eval |
|
|
import json |
|
|
from enum import Enum |
|
|
import gradio as gr |
|
|
|
|
|
load_dotenv() |
|
|
openai.api_key = os.getenv("OPENAI_API_KEY") |
|
|
CHAT_ENDPOINT="https://api.openai.com/v1/chat/completions" |
|
|
CHAT_MODEL = "gpt-3.5-turbo" |
|
|
CHAT_AUTH = {"Authorization": "Bearer " + openai.api_key} |
|
|
MAX_TOKENS = 250 |
|
|
|
|
|
|
|
|
|
|
|
class ChatRoles(): |
|
|
SYSTEM = "system" |
|
|
ASSISTANT = "assistant" |
|
|
USER = "user" |
|
|
|
|
|
def get_assistant_response(gpt_history): |
|
|
params = { |
|
|
"model": CHAT_MODEL, |
|
|
"messages": gpt_history, |
|
|
"max_tokens": MAX_TOKENS |
|
|
} |
|
|
response = requests.post(url=CHAT_ENDPOINT, json=params, headers=CHAT_AUTH) |
|
|
print(literal_eval(response.content.decode("utf-8"))) |
|
|
response_message = literal_eval(response.content.decode("utf-8"))["choices"][0]["message"]["content"] |
|
|
gpt_history.append({"role": ChatRoles.ASSISTANT, "content": response_message}) |
|
|
print("\n" + response_message) |
|
|
return response_message |
|
|
|
|
|
hardcoded = { |
|
|
1: "Hi, I'm an AI powered college counselor from Cledge! What prompt do you want help with?", |
|
|
2: "Pick as many questions to answer as you'd like. Write the number of the question and then your response." |
|
|
} |
|
|
|
|
|
instructions = { |
|
|
2: "Based on these responses, generate 5 questions to help them brainstorm.", |
|
|
3: "Based on these responses, ask follow up questions that help them narrow down the focus of the essay", |
|
|
4: "Based on these responses, ask follow up questions that help them identify key themes in the essay", |
|
|
5: "Based on these responses, think of 5 ideas for personal statement essays. Write a synopsis of each idea.", |
|
|
} |
|
|
|
|
|
def grad_demo(): |
|
|
with gr.Blocks() as demo: |
|
|
gpt_history = [] |
|
|
def user(user_message, history): |
|
|
gpt_history.append({"role": ChatRoles.USER, "content": user_message}) |
|
|
print(f"Length of gpt_history: {gpt_history}") |
|
|
return "", history + [[user_message, None]] |
|
|
def bot(history): |
|
|
step = len(history) |
|
|
print(f"STEP: {step}") |
|
|
bot_message = "" |
|
|
if step in instructions: |
|
|
gpt_history.append({"role": ChatRoles.SYSTEM, "content": instructions[step]}) |
|
|
bot_message = get_assistant_response(gpt_history) |
|
|
if step in hardcoded: |
|
|
bot_message = f"{bot_message}\n\n {hardcoded[step]}" |
|
|
history[-1][1] = bot_message |
|
|
gpt_history.append({"role": ChatRoles.ASSISTANT, "content": bot_message}) |
|
|
print(f"Length of gpt_history: {gpt_history}") |
|
|
return history |
|
|
def initialize(): |
|
|
gpt_history.clear() |
|
|
history = bot([[None, None]]) |
|
|
return history |
|
|
chatbot = gr.Chatbot(value = initialize) |
|
|
msg = gr.Textbox() |
|
|
clear = gr.Button("Clear") |
|
|
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( |
|
|
bot, chatbot, chatbot |
|
|
) |
|
|
clear.click(lambda: None, None, chatbot, queue=False) |
|
|
demo.launch() |
|
|
|
|
|
if __name__ == "__main__": |
|
|
grad_demo() |
|
|
|