Bofandra commited on
Commit
adc1e0f
·
verified ·
1 Parent(s): 73aca0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -5,7 +5,7 @@ from langchain.vectorstores import FAISS
5
  from langchain.embeddings import HuggingFaceEmbeddings
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain.document_loaders import PyPDFLoader
8
- from langchain_huggingface import ChatHuggingFaceEndpoint
9
  import tempfile
10
 
11
  # Initialize global variables
@@ -29,12 +29,15 @@ def process_pdf(file):
29
  retriever = vectorstore.as_retriever(search_kwargs={"k": 4})
30
 
31
  # ✅ Wrap DeepSeek model properly
32
- llm = ChatHuggingFaceEndpoint(
33
- repo_id="deepseek-ai/DeepSeek-R1-0528",
34
- temperature=0.7,
35
- max_new_tokens=512
 
 
 
36
  )
37
-
38
  retrieval_chain = ConversationalRetrievalChain.from_llm(
39
  llm=llm,
40
  retriever=retriever,
 
5
  from langchain.embeddings import HuggingFaceEmbeddings
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain.document_loaders import PyPDFLoader
8
+ from langchain_huggingface import HuggingFaceEndpoint
9
  import tempfile
10
 
11
  # Initialize global variables
 
29
  retriever = vectorstore.as_retriever(search_kwargs={"k": 4})
30
 
31
  # ✅ Wrap DeepSeek model properly
32
+ llm = HuggingFaceEndpoint(
33
+ repo_id="deepseek-ai/DeepSeek-R1-0528", # Example model
34
+ task="text-generation",
35
+ max_new_tokens=512,
36
+ do_sample=False,
37
+ repetition_penalty=1.03,
38
+ provider="auto", # or specify a provider like "hyperbolic"
39
  )
40
+
41
  retrieval_chain = ConversationalRetrievalChain.from_llm(
42
  llm=llm,
43
  retriever=retriever,