Spaces:
Sleeping
Sleeping
Commit ·
66ba57f
1
Parent(s): f159474
Update app.py
Browse files
app.py
CHANGED
|
@@ -43,101 +43,6 @@ def parse_codeblock(text):
|
|
| 43 |
if i > 0:
|
| 44 |
lines[i] = "<br/>" + line.replace("<", "<").replace(">", ">")
|
| 45 |
return "".join(lines)
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
# def predict(inputs, top_p, temperature, chat_counter, chatbot, history, request:gr.Request):
|
| 49 |
-
# payload = {
|
| 50 |
-
# "model": MODEL,
|
| 51 |
-
# "messages": [{"role": "user", "content": f"{inputs}"}],
|
| 52 |
-
# "temperature" : 1.0,
|
| 53 |
-
# "top_p":1.0,
|
| 54 |
-
# "n" : 1,
|
| 55 |
-
# "stream": True,
|
| 56 |
-
# "presence_penalty":0,
|
| 57 |
-
# "frequency_penalty":0,
|
| 58 |
-
# }
|
| 59 |
-
|
| 60 |
-
# headers = {
|
| 61 |
-
# "Content-Type": "application/json",
|
| 62 |
-
# "Authorization": f"Bearer {OPENAI_API_KEY}",
|
| 63 |
-
# "Headers": f"{request.kwargs['headers']}"
|
| 64 |
-
# }
|
| 65 |
-
|
| 66 |
-
# # print(f"chat_counter - {chat_counter}")
|
| 67 |
-
# if chat_counter != 0 :
|
| 68 |
-
# messages = []
|
| 69 |
-
# for i, data in enumerate(history):
|
| 70 |
-
# if i % 2 == 0:
|
| 71 |
-
# role = 'user'
|
| 72 |
-
# else:
|
| 73 |
-
# role = 'assistant'
|
| 74 |
-
# message = {}
|
| 75 |
-
# message["role"] = role
|
| 76 |
-
# message["content"] = data
|
| 77 |
-
# messages.append(message)
|
| 78 |
-
|
| 79 |
-
# message = {}
|
| 80 |
-
# message["role"] = "user"
|
| 81 |
-
# message["content"] = inputs
|
| 82 |
-
# messages.append(message)
|
| 83 |
-
# payload = {
|
| 84 |
-
# "model": MODEL,
|
| 85 |
-
# "messages": messages,
|
| 86 |
-
# "temperature" : temperature,
|
| 87 |
-
# "top_p": top_p,
|
| 88 |
-
# "n" : 1,
|
| 89 |
-
# "stream": True,
|
| 90 |
-
# "presence_penalty":0,
|
| 91 |
-
# "frequency_penalty":0,
|
| 92 |
-
# }
|
| 93 |
-
|
| 94 |
-
# chat_counter += 1
|
| 95 |
-
|
| 96 |
-
# history.append(inputs)
|
| 97 |
-
# token_counter = 0
|
| 98 |
-
# partial_words = ""
|
| 99 |
-
# counter = 0
|
| 100 |
-
|
| 101 |
-
# try:
|
| 102 |
-
# # make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
| 103 |
-
# response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
| 104 |
-
# response_code = f"{response}"
|
| 105 |
-
# #if response_code.strip() != "<Response [200]>":
|
| 106 |
-
# # #print(f"response code - {response}")
|
| 107 |
-
# # raise Exception(f"Sorry, hitting rate limit. Please try again later. {response}")
|
| 108 |
-
|
| 109 |
-
# for chunk in response.iter_lines():
|
| 110 |
-
# #Skipping first chunk
|
| 111 |
-
# if counter == 0:
|
| 112 |
-
# counter += 1
|
| 113 |
-
# continue
|
| 114 |
-
# #counter+=1
|
| 115 |
-
# # check whether each line is non-empty
|
| 116 |
-
# if chunk.decode() :
|
| 117 |
-
# chunk = chunk.decode()
|
| 118 |
-
# # decode each line as response data is in bytes
|
| 119 |
-
# if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
|
| 120 |
-
# partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
| 121 |
-
# if token_counter == 0:
|
| 122 |
-
# history.append(" " + partial_words)
|
| 123 |
-
# else:
|
| 124 |
-
# history[-1] = partial_words
|
| 125 |
-
# token_counter += 1
|
| 126 |
-
# yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=False), gr.update(interactive=False) # resembles {chatbot: chat, state: history}
|
| 127 |
-
# except Exception as e:
|
| 128 |
-
# print (f'error found: {e}')
|
| 129 |
-
# yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=True), gr.update(interactive=True)
|
| 130 |
-
# print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter}))
|
| 131 |
-
|
| 132 |
-
def get_random_sample(lists, total_k=TOTAL_K):
|
| 133 |
-
output_list = []
|
| 134 |
-
while True:
|
| 135 |
-
k = random.choice(lists)
|
| 136 |
-
if not lists[k] in output_list:
|
| 137 |
-
output_list.append(lists[k])
|
| 138 |
-
if len(output_list) == total_k:
|
| 139 |
-
break
|
| 140 |
-
return output_list
|
| 141 |
|
| 142 |
def reset_textbox():
|
| 143 |
return gr.update(value='', interactive=False), gr.update(interactive=False)
|
|
@@ -178,12 +83,17 @@ Assistant: <utterance>
|
|
| 178 |
In this app, you can explore the outputs of a gpt-3.5 LLM.
|
| 179 |
"""
|
| 180 |
|
| 181 |
-
def submit(image):
|
| 182 |
global CURRENT_POSITION
|
| 183 |
balloon = Image.open(os.path.join(os.path.dirname(__file__), 'data/balloon.png')).resize((64, 64))
|
| 184 |
new_image = Image.fromarray(image).convert('RGBA')
|
| 185 |
for k in range(TOTAL_K):
|
| 186 |
new_image.paste(balloon, [CURRENT_POSITION[k][0], CURRENT_POSITION[k][1] - 64], balloon)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 187 |
return np.array(new_image)
|
| 188 |
|
| 189 |
theme = gr.themes.Default(primary_hue="green")
|
|
@@ -255,7 +165,7 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
|
| 255 |
b0.click(random_sample, inputs = [], outputs = [image])
|
| 256 |
b2.click(reset_sample, inputs = [], outputs = [image])
|
| 257 |
|
| 258 |
-
b1.click(submit, inputs = [image], outputs = [image])
|
| 259 |
|
| 260 |
# inputs.submit(reset_textbox, [], [inputs, b1], queue=False)
|
| 261 |
# inputs.submit(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key
|
|
|
|
| 43 |
if i > 0:
|
| 44 |
lines[i] = "<br/>" + line.replace("<", "<").replace(">", ">")
|
| 45 |
return "".join(lines)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
def reset_textbox():
|
| 48 |
return gr.update(value='', interactive=False), gr.update(interactive=False)
|
|
|
|
| 83 |
In this app, you can explore the outputs of a gpt-3.5 LLM.
|
| 84 |
"""
|
| 85 |
|
| 86 |
+
def submit(image, agents):
|
| 87 |
global CURRENT_POSITION
|
| 88 |
balloon = Image.open(os.path.join(os.path.dirname(__file__), 'data/balloon.png')).resize((64, 64))
|
| 89 |
new_image = Image.fromarray(image).convert('RGBA')
|
| 90 |
for k in range(TOTAL_K):
|
| 91 |
new_image.paste(balloon, [CURRENT_POSITION[k][0], CURRENT_POSITION[k][1] - 64], balloon)
|
| 92 |
+
|
| 93 |
+
Answer = 'O'
|
| 94 |
+
for k in range(TOTAL_K):
|
| 95 |
+
agents[k].update(value = Answer)
|
| 96 |
+
|
| 97 |
return np.array(new_image)
|
| 98 |
|
| 99 |
theme = gr.themes.Default(primary_hue="green")
|
|
|
|
| 165 |
b0.click(random_sample, inputs = [], outputs = [image])
|
| 166 |
b2.click(reset_sample, inputs = [], outputs = [image])
|
| 167 |
|
| 168 |
+
b1.click(submit, inputs = [image, agents], outputs = [image])
|
| 169 |
|
| 170 |
# inputs.submit(reset_textbox, [], [inputs, b1], queue=False)
|
| 171 |
# inputs.submit(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key
|