10tenfirestorm commited on
Commit
0d2c7f0
·
verified ·
1 Parent(s): 040e351

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -5
app.py CHANGED
@@ -4,11 +4,13 @@ os.environ["USER_AGENT"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
4
 
5
  import gradio as gr
6
  from langchain_community.document_loaders import WebBaseLoader, PyMuPDFLoader
7
- from langchain_huggingface import HuggingFaceEmbeddings
8
  from langchain_community.vectorstores import FAISS
9
- from langchain_huggingface import HuggingFaceEndpoint
10
  from langchain.chains.question_answering import load_qa_chain
11
 
 
 
 
 
12
  # Get the token from the secrets
13
  hf_token = os.environ.get("HF_TOKEN")
14
 
@@ -23,6 +25,7 @@ def load_website(url):
23
  return docs
24
 
25
  def setup_vector_store(docs):
 
26
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
27
  vector_store = FAISS.from_documents(docs, embeddings)
28
  return vector_store
@@ -31,12 +34,14 @@ def ask_question(query, vector_store):
31
  retriever = vector_store.as_retriever()
32
  docs = retriever.get_relevant_documents(query)
33
 
34
- # Using HuggingFaceEndpoint with specific task parameters
 
35
  llm = HuggingFaceEndpoint(
36
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
37
  task="text-generation",
38
  max_new_tokens=512,
39
- temperature=0.5,
 
40
  huggingfacehub_api_token=hf_token
41
  )
42
 
@@ -58,6 +63,9 @@ def process_input(weblink, pdf_file, question):
58
  if pdf_file:
59
  docs.extend(load_pdf(pdf_file.name))
60
 
 
 
 
61
  vector_store = setup_vector_store(docs)
62
  response = ask_question(question, vector_store)
63
  return response
@@ -73,7 +81,8 @@ demo = gr.Interface(
73
  gr.Textbox(label="Ask a Question")
74
  ],
75
  outputs=gr.Textbox(label="Final Answer"),
76
- title="Web & PDF QA System"
 
77
  )
78
 
79
  if __name__ == "__main__":
 
4
 
5
  import gradio as gr
6
  from langchain_community.document_loaders import WebBaseLoader, PyMuPDFLoader
 
7
  from langchain_community.vectorstores import FAISS
 
8
  from langchain.chains.question_answering import load_qa_chain
9
 
10
+ # --- THE NEW MODERN IMPORTS ---
11
+ # These replace the old tools that were causing the errors
12
+ from langchain_huggingface import HuggingFaceEndpoint, HuggingFaceEmbeddings
13
+
14
  # Get the token from the secrets
15
  hf_token = os.environ.get("HF_TOKEN")
16
 
 
25
  return docs
26
 
27
  def setup_vector_store(docs):
28
+ # use the new HuggingFaceEmbeddings class
29
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
30
  vector_store = FAISS.from_documents(docs, embeddings)
31
  return vector_store
 
34
  retriever = vector_store.as_retriever()
35
  docs = retriever.get_relevant_documents(query)
36
 
37
+ # Use the new HuggingFaceEndpoint
38
+ # This automatically handles the connection without the "post" error
39
  llm = HuggingFaceEndpoint(
40
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
41
  task="text-generation",
42
  max_new_tokens=512,
43
+ do_sample=True,
44
+ temperature=0.7,
45
  huggingfacehub_api_token=hf_token
46
  )
47
 
 
63
  if pdf_file:
64
  docs.extend(load_pdf(pdf_file.name))
65
 
66
+ if not docs:
67
+ return "Could not load any content."
68
+
69
  vector_store = setup_vector_store(docs)
70
  response = ask_question(question, vector_store)
71
  return response
 
81
  gr.Textbox(label="Ask a Question")
82
  ],
83
  outputs=gr.Textbox(label="Final Answer"),
84
+ title="Web & PDF QA System",
85
+ description="Ask questions about any website or PDF document."
86
  )
87
 
88
  if __name__ == "__main__":