maliahson commited on
Commit
7d53878
·
verified ·
1 Parent(s): ed2c83f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -9
app.py CHANGED
@@ -1,11 +1,14 @@
1
  import gradio as gr
2
  import os
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  from langchain_community.vectorstores import Chroma
5
  from langchain_text_splitters import RecursiveCharacterTextSplitter
6
  from langchain_community.document_loaders import TextLoader
7
- from langchain_community.embeddings import HuggingFaceEmbeddings
8
- import torch
 
 
9
 
10
  # Load Hugging Face model & tokenizer
11
  MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
@@ -49,9 +52,9 @@ def find_related_documents(query):
49
  return vector_db.similarity_search(query)
50
 
51
  # Function to generate answers using Hugging Face Model
52
- def generate_answer(question, uploaded_file):
53
  # Process file if uploaded
54
- if uploaded_file:
55
  file_path = os.path.join(UPLOAD_FOLDER, uploaded_file.name)
56
  with open(file_path, "wb") as f:
57
  f.write(uploaded_file.read())
@@ -71,13 +74,20 @@ def generate_answer(question, uploaded_file):
71
 
72
  return answer
73
 
74
- # Gradio UI for chatbot
 
 
 
75
  with gr.Blocks() as demo:
76
  gr.Markdown("### 📘 DocuMind AI - DeepSeek Qwen Chatbot")
77
 
78
- chatbot = gr.ChatInterface(generate_answer)
79
- file_input = gr.File(label="Upload a Document (Optional)")
80
-
81
- chatbot.chatbot.input_components.append(file_input)
 
 
 
 
82
 
83
  demo.launch()
 
1
  import gradio as gr
2
  import os
3
+ import torch
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
  from langchain_community.vectorstores import Chroma
6
  from langchain_text_splitters import RecursiveCharacterTextSplitter
7
  from langchain_community.document_loaders import TextLoader
8
+ from langchain_huggingface import HuggingFaceEmbeddings # ✅ UPDATED IMPORT
9
+
10
+ # Fix Torch SDPA issue
11
+ os.environ["TORCH_USE_CUDA_DSA"] = "1"
12
 
13
  # Load Hugging Face model & tokenizer
14
  MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
 
52
  return vector_db.similarity_search(query)
53
 
54
  # Function to generate answers using Hugging Face Model
55
+ def generate_answer(question, uploaded_file=None):
56
  # Process file if uploaded
57
+ if uploaded_file is not None:
58
  file_path = os.path.join(UPLOAD_FOLDER, uploaded_file.name)
59
  with open(file_path, "wb") as f:
60
  f.write(uploaded_file.read())
 
74
 
75
  return answer
76
 
77
+ # Gradio UI for chatbot with File Upload Support ✅ FIXED
78
+ def chatbot_interface(question, uploaded_file):
79
+ return generate_answer(question, uploaded_file)
80
+
81
  with gr.Blocks() as demo:
82
  gr.Markdown("### 📘 DocuMind AI - DeepSeek Qwen Chatbot")
83
 
84
+ with gr.Row():
85
+ question_input = gr.Textbox(label="Ask a Question")
86
+ file_input = gr.File(label="Upload a Document (Optional)")
87
+
88
+ submit_btn = gr.Button("Get Answer")
89
+ output_text = gr.Textbox(label="Answer")
90
+
91
+ submit_btn.click(chatbot_interface, inputs=[question_input, file_input], outputs=output_text)
92
 
93
  demo.launch()