maliahson commited on
Commit
fd03ba3
·
verified ·
1 Parent(s): 7d53878

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -5,7 +5,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
5
  from langchain_community.vectorstores import Chroma
6
  from langchain_text_splitters import RecursiveCharacterTextSplitter
7
  from langchain_community.document_loaders import TextLoader
8
- from langchain_huggingface import HuggingFaceEmbeddings # ✅ UPDATED IMPORT
9
 
10
  # Fix Torch SDPA issue
11
  os.environ["TORCH_USE_CUDA_DSA"] = "1"
@@ -30,7 +30,6 @@ vector_db = Chroma(persist_directory=CHROMA_DB_PATH, embedding_function=HuggingF
30
  PROMPT_TEMPLATE = """
31
  You are an expert research assistant. Use the provided context to answer the query.
32
  If unsure, state that you don't know. Be concise and factual (max 3 sentences).
33
-
34
  Query: {user_query}
35
  Context: {document_context}
36
  Answer:
@@ -53,8 +52,8 @@ def find_related_documents(query):
53
 
54
  # Function to generate answers using Hugging Face Model
55
  def generate_answer(question, uploaded_file=None):
56
- # Process file if uploaded
57
- if uploaded_file is not None:
58
  file_path = os.path.join(UPLOAD_FOLDER, uploaded_file.name)
59
  with open(file_path, "wb") as f:
60
  f.write(uploaded_file.read())
@@ -74,8 +73,8 @@ def generate_answer(question, uploaded_file=None):
74
 
75
  return answer
76
 
77
- # Gradio UI for chatbot with File Upload Support ✅ FIXED
78
- def chatbot_interface(question, uploaded_file):
79
  return generate_answer(question, uploaded_file)
80
 
81
  with gr.Blocks() as demo:
@@ -83,11 +82,11 @@ with gr.Blocks() as demo:
83
 
84
  with gr.Row():
85
  question_input = gr.Textbox(label="Ask a Question")
86
- file_input = gr.File(label="Upload a Document (Optional)")
87
 
88
  submit_btn = gr.Button("Get Answer")
89
  output_text = gr.Textbox(label="Answer")
90
 
91
  submit_btn.click(chatbot_interface, inputs=[question_input, file_input], outputs=output_text)
92
 
93
- demo.launch()
 
5
  from langchain_community.vectorstores import Chroma
6
  from langchain_text_splitters import RecursiveCharacterTextSplitter
7
  from langchain_community.document_loaders import TextLoader
8
+ from langchain_huggingface import HuggingFaceEmbeddings
9
 
10
  # Fix Torch SDPA issue
11
  os.environ["TORCH_USE_CUDA_DSA"] = "1"
 
30
  PROMPT_TEMPLATE = """
31
  You are an expert research assistant. Use the provided context to answer the query.
32
  If unsure, state that you don't know. Be concise and factual (max 3 sentences).
 
33
  Query: {user_query}
34
  Context: {document_context}
35
  Answer:
 
52
 
53
  # Function to generate answers using Hugging Face Model
54
  def generate_answer(question, uploaded_file=None):
55
+ # Handle optional file upload
56
+ if uploaded_file and hasattr(uploaded_file, "name"):
57
  file_path = os.path.join(UPLOAD_FOLDER, uploaded_file.name)
58
  with open(file_path, "wb") as f:
59
  f.write(uploaded_file.read())
 
73
 
74
  return answer
75
 
76
+ # Gradio UI for chatbot with truly optional file upload
77
+ def chatbot_interface(question, uploaded_file=None):
78
  return generate_answer(question, uploaded_file)
79
 
80
  with gr.Blocks() as demo:
 
82
 
83
  with gr.Row():
84
  question_input = gr.Textbox(label="Ask a Question")
85
+ file_input = gr.File(label="Upload a Document (Optional)", optional=True) # ✅ Explicitly optional
86
 
87
  submit_btn = gr.Button("Get Answer")
88
  output_text = gr.Textbox(label="Answer")
89
 
90
  submit_btn.click(chatbot_interface, inputs=[question_input, file_input], outputs=output_text)
91
 
92
+ demo.launch()