File size: 3,163 Bytes
dd1df32 5188aef dd1df32 1c7d99c dd1df32 dbb1600 dd1df32 3bd9d74 dd1df32 b99b768 dd1df32 04ae05e 8693f06 04ae05e 7d59900 dd1df32 04ae05e ff4f21c dd1df32 b27faad dd1df32 b27faad dd1df32 04ae05e dd1df32 04ae05e dd1df32 b27faad dd1df32 b27faad 77c181e b27faad dd1df32 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
import os
from dotenv import load_dotenv
import openai
import random
import requests
from ast import literal_eval
import json
from enum import Enum
import gradio as gr
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
CHAT_ENDPOINT="https://api.openai.com/v1/chat/completions"
CHAT_MODEL = "gpt-3.5-turbo"
CHAT_AUTH = {"Authorization": "Bearer " + openai.api_key}
MAX_TOKENS = 250
#TODO: handle max length limits
class ChatRoles():
SYSTEM = "system"
ASSISTANT = "assistant"
USER = "user"
def get_assistant_response(gpt_history):
params = {
"model": CHAT_MODEL,
"messages": gpt_history,
"max_tokens": MAX_TOKENS
}
response = requests.post(url=CHAT_ENDPOINT, json=params, headers=CHAT_AUTH)
print(literal_eval(response.content.decode("utf-8")))
response_message = literal_eval(response.content.decode("utf-8"))["choices"][0]["message"]["content"]
gpt_history.append({"role": ChatRoles.ASSISTANT, "content": response_message})
print("\n" + response_message)
return response_message
hardcoded = {
1: "Hi, I'm an AI powered college counselor from Cledge! What prompt do you want help with?",
2: "Pick as many questions to answer as you'd like. Write the number of the question and then your response."
}
instructions = {
2: "Based on these responses, generate 5 questions to help them brainstorm.",
3: "Based on these responses, ask follow up questions that help them narrow down the focus of the essay",
4: "Based on these responses, ask follow up questions that help them identify key themes in the essay",
5: "Based on these responses, think of 5 ideas for personal statement essays. Write a synopsis of each idea.",
}
def grad_demo():
with gr.Blocks() as demo:
gpt_history = []
def user(user_message, history):
gpt_history.append({"role": ChatRoles.USER, "content": user_message})
print(f"Length of gpt_history: {gpt_history}")
return "", history + [[user_message, None]]
def bot(history):
step = len(history)
print(f"STEP: {step}")
bot_message = ""
if step in instructions:
gpt_history.append({"role": ChatRoles.SYSTEM, "content": instructions[step]})
bot_message = get_assistant_response(gpt_history)
if step in hardcoded:
bot_message = f"{bot_message}\n\n {hardcoded[step]}"
history[-1][1] = bot_message
gpt_history.append({"role": ChatRoles.ASSISTANT, "content": bot_message})
print(f"Length of gpt_history: {gpt_history}")
return history
def initialize():
gpt_history.clear()
history = bot([[None, None]])
return history
chatbot = gr.Chatbot(value = initialize)
msg = gr.Textbox()
clear = gr.Button("Clear")
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
clear.click(lambda: None, None, chatbot, queue=False)
demo.launch()
if __name__ == "__main__":
grad_demo()
#main() |