Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,30 +12,31 @@ from IPython import get_ipython
|
|
| 12 |
|
| 13 |
GPT_MODEL = "gpt-3.5-turbo-1106"
|
| 14 |
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
messages=[]
|
| 18 |
|
| 19 |
def exec_python(cell):
|
| 20 |
-
|
| 21 |
-
print(
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
| 33 |
|
| 34 |
# Now let's define the function specification:
|
| 35 |
functions = [
|
| 36 |
{
|
| 37 |
"name": "exec_python",
|
| 38 |
-
"description": "run
|
| 39 |
"parameters": {
|
| 40 |
"type": "object",
|
| 41 |
"properties": {
|
|
@@ -120,6 +121,7 @@ def chat_completion_request(messages, functions=None, function_call=None, model=
|
|
| 120 |
|
| 121 |
# Set up the data for the API request
|
| 122 |
json_data = {"model": model, "messages": messages}
|
|
|
|
| 123 |
|
| 124 |
# If functions were provided, add them to the data
|
| 125 |
if functions is not None:
|
|
@@ -166,13 +168,9 @@ def first_call(init_prompt, user_input):
|
|
| 166 |
cost1 = openai_api_calculate_cost(usage)
|
| 167 |
|
| 168 |
# Let's see what we got back before continuing
|
| 169 |
-
return assistant_message, cost1
|
| 170 |
|
| 171 |
|
| 172 |
-
def second_prompt_build(prompt, log):
|
| 173 |
-
prompt_second = prompt.format(ans = log)
|
| 174 |
-
return prompt_second
|
| 175 |
-
|
| 176 |
def function_call_process(assistant_message):
|
| 177 |
if assistant_message.get("function_call") != None:
|
| 178 |
|
|
@@ -189,13 +187,17 @@ def function_call_process(assistant_message):
|
|
| 189 |
return result
|
| 190 |
|
| 191 |
# print(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
|
| 193 |
-
def second_call(prompt,
|
| 194 |
# Add a new message to the conversation with the function result
|
| 195 |
messages.append({
|
| 196 |
"role": "function",
|
| 197 |
"name": function_name,
|
| 198 |
-
"content": str(
|
| 199 |
})
|
| 200 |
|
| 201 |
# Call the model again to generate a user-facing message based on the function result
|
|
@@ -210,36 +212,41 @@ def second_call(prompt, result, function_name = "exec_python"):
|
|
| 210 |
|
| 211 |
# Print the final conversation
|
| 212 |
# pretty_print_conversation(messages)
|
| 213 |
-
return assistant_message, cost2
|
| 214 |
|
| 215 |
|
| 216 |
def main_function(init_prompt, prompt, user_input):
|
| 217 |
-
first_call_result, cost1 = first_call(init_prompt, user_input)
|
| 218 |
function_call_process_result = function_call_process(first_call_result)
|
| 219 |
second_prompt_build_result = second_prompt_build(prompt, function_call_process_result)
|
| 220 |
-
second_call_result, cost2 = second_call(second_prompt_build_result, function_call_process_result)
|
| 221 |
-
|
|
|
|
|
|
|
| 222 |
|
| 223 |
def gradio_function():
|
| 224 |
init_prompt = gr.Textbox(label="init_prompt (for 1st call)")
|
| 225 |
prompt = gr.Textbox(label="prompt (for 2nd call)")
|
| 226 |
user_input = gr.Textbox(label="User Input")
|
| 227 |
-
output_1st_call = gr.
|
| 228 |
-
output_fc_call = gr.Textbox(label="output_fc_call")
|
| 229 |
-
|
|
|
|
| 230 |
cost = gr.Textbox(label="Cost 1")
|
| 231 |
cost2 = gr.Textbox(label="Cost 2")
|
| 232 |
-
|
|
|
|
|
|
|
| 233 |
|
| 234 |
iface = gr.Interface(
|
| 235 |
fn=main_function,
|
| 236 |
inputs=[init_prompt, prompt, user_input],
|
| 237 |
-
outputs=[output_1st_call, output_fc_call, output_2nd_call, cost, cost2],
|
| 238 |
title="Test",
|
| 239 |
description="Accuracy",
|
| 240 |
)
|
| 241 |
|
| 242 |
-
iface.launch(share=True)
|
| 243 |
|
| 244 |
if __name__ == "__main__":
|
| 245 |
gradio_function()
|
|
|
|
| 12 |
|
| 13 |
GPT_MODEL = "gpt-3.5-turbo-1106"
|
| 14 |
|
| 15 |
+
from google.colab import userdata
|
| 16 |
+
openai.api_key = userdata.get('OPENAI_API_KEY')
|
|
|
|
| 17 |
|
| 18 |
def exec_python(cell):
|
| 19 |
+
# result = 0
|
| 20 |
+
# print(cell)
|
| 21 |
+
# print(type(cell))
|
| 22 |
+
# code = json.loads(cell)
|
| 23 |
+
# print(code)
|
| 24 |
+
# exec(code["cell"])
|
| 25 |
+
inputcode = cell
|
| 26 |
+
print(inputcode)
|
| 27 |
+
code = inputcode
|
| 28 |
+
# code_string = code["cell"]
|
| 29 |
+
local_namespace = {}
|
| 30 |
+
exec(code, globals(), local_namespace)
|
| 31 |
+
print(local_namespace.values())
|
| 32 |
+
|
| 33 |
+
return result
|
| 34 |
|
| 35 |
# Now let's define the function specification:
|
| 36 |
functions = [
|
| 37 |
{
|
| 38 |
"name": "exec_python",
|
| 39 |
+
"description": "run python code and return the execution result.",
|
| 40 |
"parameters": {
|
| 41 |
"type": "object",
|
| 42 |
"properties": {
|
|
|
|
| 121 |
|
| 122 |
# Set up the data for the API request
|
| 123 |
json_data = {"model": model, "messages": messages}
|
| 124 |
+
# json_data = {"model": model, "messages": messages, "temperature": 0.2, "top_p": 0.1}
|
| 125 |
|
| 126 |
# If functions were provided, add them to the data
|
| 127 |
if functions is not None:
|
|
|
|
| 168 |
cost1 = openai_api_calculate_cost(usage)
|
| 169 |
|
| 170 |
# Let's see what we got back before continuing
|
| 171 |
+
return assistant_message, cost1, messages
|
| 172 |
|
| 173 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
def function_call_process(assistant_message):
|
| 175 |
if assistant_message.get("function_call") != None:
|
| 176 |
|
|
|
|
| 187 |
return result
|
| 188 |
|
| 189 |
# print(result)
|
| 190 |
+
def second_prompt_build(prompt, log):
|
| 191 |
+
prompt_second = prompt.format(ans = log)
|
| 192 |
+
# prompt_second = prompt % log
|
| 193 |
+
return prompt_second
|
| 194 |
|
| 195 |
+
def second_call(prompt, prompt_second, messages, function_name = "exec_python"):
|
| 196 |
# Add a new message to the conversation with the function result
|
| 197 |
messages.append({
|
| 198 |
"role": "function",
|
| 199 |
"name": function_name,
|
| 200 |
+
"content": str(prompt_second), # Convert the result to a string
|
| 201 |
})
|
| 202 |
|
| 203 |
# Call the model again to generate a user-facing message based on the function result
|
|
|
|
| 212 |
|
| 213 |
# Print the final conversation
|
| 214 |
# pretty_print_conversation(messages)
|
| 215 |
+
return assistant_message, cost2, messages
|
| 216 |
|
| 217 |
|
| 218 |
def main_function(init_prompt, prompt, user_input):
|
| 219 |
+
first_call_result, cost1, messages = first_call(init_prompt, user_input)
|
| 220 |
function_call_process_result = function_call_process(first_call_result)
|
| 221 |
second_prompt_build_result = second_prompt_build(prompt, function_call_process_result)
|
| 222 |
+
second_call_result, cost2, finalmessages = second_call(second_prompt_build_result, function_call_process_result, messages)
|
| 223 |
+
finalcostresult = cost1 + cost2
|
| 224 |
+
finalcostrpresult = finalcostresult * 15000
|
| 225 |
+
return first_call_result, function_call_process_result, second_prompt_build_result, second_call_result, cost1, cost2, finalmessages, finalcostresult, finalcostrpresult
|
| 226 |
|
| 227 |
def gradio_function():
|
| 228 |
init_prompt = gr.Textbox(label="init_prompt (for 1st call)")
|
| 229 |
prompt = gr.Textbox(label="prompt (for 2nd call)")
|
| 230 |
user_input = gr.Textbox(label="User Input")
|
| 231 |
+
output_1st_call = gr.JSON(label="Assistant (output_1st_call)")
|
| 232 |
+
output_fc_call = gr.Textbox(label="Function Call (exec_python) Result (output_fc_call)")
|
| 233 |
+
output_fc_call_with_prompt = gr.Textbox(label="Building 2nd Prompt (output_fc_call_with_2nd_prompt)")
|
| 234 |
+
output_2nd_call = gr.JSON(label="Assistant (output_2nd_call_buat_user)")
|
| 235 |
cost = gr.Textbox(label="Cost 1")
|
| 236 |
cost2 = gr.Textbox(label="Cost 2")
|
| 237 |
+
finalcost = gr.Textbox(label="Final Cost ($)")
|
| 238 |
+
finalcostrp = gr.Textbox(label="Final Cost (Rp)")
|
| 239 |
+
finalmessages = gr.JSON(label="Final Messages")
|
| 240 |
|
| 241 |
iface = gr.Interface(
|
| 242 |
fn=main_function,
|
| 243 |
inputs=[init_prompt, prompt, user_input],
|
| 244 |
+
outputs=[output_1st_call, output_fc_call, output_fc_call_with_prompt, output_2nd_call, cost, cost2, finalmessages, finalcost, finalcostrp],
|
| 245 |
title="Test",
|
| 246 |
description="Accuracy",
|
| 247 |
)
|
| 248 |
|
| 249 |
+
iface.launch(share=True, debug=True)
|
| 250 |
|
| 251 |
if __name__ == "__main__":
|
| 252 |
gradio_function()
|