Spaces:
Runtime error
Runtime error
Delete app.py
Browse files
app.py
DELETED
|
@@ -1,99 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import time
|
| 3 |
-
|
| 4 |
-
import gradio as gr
|
| 5 |
-
import pandas as pd
|
| 6 |
-
from model import Model
|
| 7 |
-
from tqdm import tqdm
|
| 8 |
-
|
| 9 |
-
tqdm.pandas()
|
| 10 |
-
|
| 11 |
-
OUTPUT_FILE = "./results_qa.csv"
|
| 12 |
-
|
| 13 |
-
def new_vote(data: gr.LikeData, question, model_name, **kwargs):
|
| 14 |
-
feedback = "Good" if data.liked else "Bad"
|
| 15 |
-
df = pd.read_csv(OUTPUT_FILE)
|
| 16 |
-
df['Feedback'] = df.apply(lambda x: feedback if (x.Model == model_name and x.Question == question) else None, axis = 1)
|
| 17 |
-
df.to_csv(OUTPUT_FILE, index=False)
|
| 18 |
-
|
| 19 |
-
# def answer_question(question: str, model_name: str, system_prompt: str):
|
| 20 |
-
# start_time = time.time()
|
| 21 |
-
# qa_model = Model(model_name=model_name)
|
| 22 |
-
# response, sources = qa_model.run(system_prompt=system_prompt, query=question)
|
| 23 |
-
# time_taken = time.time() - start_time
|
| 24 |
-
# words = len(question) + len(response)
|
| 25 |
-
# efficiency = words / time_taken
|
| 26 |
-
# final_response = f"{response} \n\nTime Taken: {time_taken}"
|
| 27 |
-
# new_row = {'Model': model_name, 'Question': question, 'Answer': response, "Sources": sources, "Time": time_taken, "Words": words, "Efficiency": efficiency, "Feedback": None, "final_response": final_response}
|
| 28 |
-
# if os.path.isfile(OUTPUT_FILE):
|
| 29 |
-
# df = pd.read_csv(OUTPUT_FILE)
|
| 30 |
-
# rows = df.values.tolist()
|
| 31 |
-
# # print("df.values.tolist(): ", df.values.tolist())
|
| 32 |
-
# # df = df.append(new_row, ignore_index=True)
|
| 33 |
-
# rows.append(new_row)
|
| 34 |
-
# else:
|
| 35 |
-
# rows = [new_row]
|
| 36 |
-
# df = pd.DataFrame(rows)
|
| 37 |
-
# df.to_csv(OUTPUT_FILE, index=False)
|
| 38 |
-
# yield [(question, final_response)]
|
| 39 |
-
|
| 40 |
-
def answer_question(question: str, model_name: str, system_prompt: str):
|
| 41 |
-
start_time = time.time()
|
| 42 |
-
qa_model = Model(model_name=model_name)
|
| 43 |
-
gen_response = qa_model.run(system_prompt=system_prompt, query=question)
|
| 44 |
-
response = ""
|
| 45 |
-
for resp in gen_response:
|
| 46 |
-
if isinstance(resp, list):
|
| 47 |
-
sources = resp
|
| 48 |
-
break
|
| 49 |
-
resp = resp.replace("$", "₹")
|
| 50 |
-
response += resp
|
| 51 |
-
yield [(question, response)], OUTPUT_FILE
|
| 52 |
-
|
| 53 |
-
time_taken = time.time() - start_time
|
| 54 |
-
words = len(question) + len(response)
|
| 55 |
-
efficiency = words / time_taken
|
| 56 |
-
temp_sources = "\n".join([f"{i + 1}. {d}" for i, d in enumerate(sources)])
|
| 57 |
-
final_response = f"{response} \n\nSources: \n{temp_sources} \n\nTime Taken: {time_taken}"
|
| 58 |
-
new_row = {'Model': model_name, 'Question': question, 'Answer': response, "Sources": sources, "Time": time_taken, "Words": words, "Efficiency": efficiency, "Feedback": None, "final_response": final_response}
|
| 59 |
-
if os.path.isfile(OUTPUT_FILE):
|
| 60 |
-
try:
|
| 61 |
-
df = pd.read_csv(OUTPUT_FILE)
|
| 62 |
-
rows = df.to_dict(orient="records")
|
| 63 |
-
rows.append(new_row)
|
| 64 |
-
except Exception:
|
| 65 |
-
rows = [new_row]
|
| 66 |
-
else:
|
| 67 |
-
rows = [new_row]
|
| 68 |
-
|
| 69 |
-
df = pd.DataFrame(rows)
|
| 70 |
-
df.to_csv(OUTPUT_FILE, index=False)
|
| 71 |
-
final_response = final_response.strip("Question").strip("\n")
|
| 72 |
-
final_response = final_response.strip("\n").strip(" ").strip("Answer:").strip("Question").strip("\n").replace("Answer:", "")
|
| 73 |
-
yield [(question, final_response)], OUTPUT_FILE
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
if __name__ == "__main__":
|
| 78 |
-
with gr.Blocks() as demo:
|
| 79 |
-
chatbot = gr.Chatbot()
|
| 80 |
-
|
| 81 |
-
# with gr.Row():
|
| 82 |
-
|
| 83 |
-
textbox = gr.Textbox(label="Query")
|
| 84 |
-
# system_prompt = """Answer the question using the context. Provide examples only from the context and use only Rupees (₹) in examples. If you don't know the answer, just say 'Please rephrase the question I am unable to answer'"""
|
| 85 |
-
system_prompt = """"Answer the question using the context. Provide examples only from the context and use only Rupees (₹) in examples. If you don't know the answer, just say 'Please rephrase the question I am unable to answer'"""
|
| 86 |
-
system_prompt = "Use the following pieces of book to answer the question at the end. \nIf you don't know the answer, please think rationally and answer from the book"
|
| 87 |
-
system_prompt = """Answer the question using the context. Provide examples only from the context and use only Rupees (₹) in examples. If you don't know the answer, just say 'Please rephrase the question I am unable to answer'"""
|
| 88 |
-
system_prompt = """Answer the question from the book. Provide examples only from the book. If you don't know the answer, just say 'Please rephrase the question'"""
|
| 89 |
-
|
| 90 |
-
choices=["gpt4", "gpt-3.5-turbo"]
|
| 91 |
-
|
| 92 |
-
system_prompt = gr.Textbox(value=system_prompt, label="System Prompt")
|
| 93 |
-
model_name = gr.Dropdown(choices=choices, value="gpt-3.5-turbo", label="Model")
|
| 94 |
-
file = gr.File(value = OUTPUT_FILE, file_types=["csv"], label="Output")
|
| 95 |
-
textbox.submit(answer_question, [textbox, model_name, system_prompt], [chatbot, file])
|
| 96 |
-
chatbot.like(new_vote, [textbox, model_name], None)
|
| 97 |
-
|
| 98 |
-
demo.queue()
|
| 99 |
-
demo.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|