Spaces:
Runtime error
Runtime error
Can Günen commited on
Commit ·
c5a14aa
1
Parent(s): 839d8bc
now we don't need to use hf tokens
Browse files- app.py +3 -7
- document_chatbot.py +3 -4
app.py
CHANGED
|
@@ -13,16 +13,12 @@ with gr.Blocks() as demo:
|
|
| 13 |
text_input = gr.Textbox(label="Enter text or URL to text file")
|
| 14 |
with gr.Column():
|
| 15 |
with gr.Row():
|
| 16 |
-
picked_model = gr.Dropdown(["google/flan-t5-large", "google/flan-t5-base","google/flan-t5-small"], label="Models", interactive=True)
|
| 17 |
-
|
| 18 |
-
load_configs.click(document_chatbot.load_token_and_model, inputs=picked_model)
|
| 19 |
-
|
| 20 |
chatbot = gr.Chatbot()
|
| 21 |
-
|
| 22 |
q_input = gr.Textbox(label="Please write your question")
|
| 23 |
clear = gr.Button("Clear")
|
| 24 |
-
q_input.submit(document_chatbot.respond, [text_input, q_input, chatbot], [q_input, chatbot])
|
| 25 |
clear.click(lambda: None, None, chatbot, queue=False)
|
| 26 |
|
| 27 |
-
|
| 28 |
demo.launch(debug=True)
|
|
|
|
| 13 |
text_input = gr.Textbox(label="Enter text or URL to text file")
|
| 14 |
with gr.Column():
|
| 15 |
with gr.Row():
|
| 16 |
+
picked_model = gr.Dropdown(["google/flan-t5-large", "google/flan-t5-base","google/flan-t5-small"], label="Models", interactive=True)
|
| 17 |
+
|
|
|
|
|
|
|
| 18 |
chatbot = gr.Chatbot()
|
|
|
|
| 19 |
q_input = gr.Textbox(label="Please write your question")
|
| 20 |
clear = gr.Button("Clear")
|
| 21 |
+
q_input.submit(document_chatbot.respond, [text_input, q_input, chatbot, picked_model], [q_input, chatbot])
|
| 22 |
clear.click(lambda: None, None, chatbot, queue=False)
|
| 23 |
|
|
|
|
| 24 |
demo.launch(debug=True)
|
document_chatbot.py
CHANGED
|
@@ -25,14 +25,13 @@ class DocumentChatbot:
|
|
| 25 |
self.init_mes = ["According to the document, ", "Based on the text, ", "I think, ", "According to the text, ", "Based on the document you provided, "]
|
| 26 |
|
| 27 |
|
| 28 |
-
|
|
|
|
|
|
|
| 29 |
self.llm = HuggingFaceHub(repo_id=model_name, model_kwargs={"temperature":0, "max_length":512})
|
| 30 |
self.chain = load_qa_chain(self.llm, chain_type="stuff")
|
| 31 |
self.embeddings = HuggingFaceEmbeddings()
|
| 32 |
return "Model and Token successfully loaded"
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
def respond(self, text_input, question, chat_history):
|
| 36 |
if not question or question.isspace():
|
| 37 |
return "Please enter a valid question.", chat_history
|
| 38 |
if text_input.startswith("http"):
|
|
|
|
| 25 |
self.init_mes = ["According to the document, ", "Based on the text, ", "I think, ", "According to the text, ", "Based on the document you provided, "]
|
| 26 |
|
| 27 |
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def respond(self, text_input, question, chat_history, model_name):
|
| 31 |
self.llm = HuggingFaceHub(repo_id=model_name, model_kwargs={"temperature":0, "max_length":512})
|
| 32 |
self.chain = load_qa_chain(self.llm, chain_type="stuff")
|
| 33 |
self.embeddings = HuggingFaceEmbeddings()
|
| 34 |
return "Model and Token successfully loaded"
|
|
|
|
|
|
|
|
|
|
| 35 |
if not question or question.isspace():
|
| 36 |
return "Please enter a valid question.", chat_history
|
| 37 |
if text_input.startswith("http"):
|