Hisab Cloud commited on
Commit
7b43b9c
·
1 Parent(s): 36421f8

upload change toPalm

Browse files
Files changed (2) hide show
  1. app.py +15 -3
  2. requirements.txt +2 -1
app.py CHANGED
@@ -3,6 +3,7 @@ from streamlit_chat import message
3
  from langchain.chains import ConversationalRetrievalChain
4
  from langchain.embeddings import HuggingFaceEmbeddings
5
  from langchain.llms import CTransformers
 
6
  from langchain.llms import Replicate
7
  from langchain.text_splitter import CharacterTextSplitter
8
  from langchain.vectorstores import FAISS
@@ -10,6 +11,8 @@ from langchain.memory import ConversationBufferMemory
10
  from langchain.document_loaders import PyPDFLoader
11
  from langchain.document_loaders import TextLoader
12
  from langchain.document_loaders import Docx2txtLoader
 
 
13
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
14
  import os
15
  from dotenv import load_dotenv
@@ -63,11 +66,16 @@ def create_conversational_chain(vector_store):
63
  #streaming=True,
64
  #callbacks=[StreamingStdOutCallbackHandler()],
65
  #model_type="llama", config={'max_new_tokens': 500, 'temperature': 0.01})
66
- llm = Replicate(
 
 
 
 
 
67
  streaming = True,
68
- model = "replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
69
  callbacks=[StreamingStdOutCallbackHandler()],
70
- input = {"temperature": 0.01, "max_length" :500,"top_p":1})
71
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
72
 
73
  chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
@@ -98,6 +106,10 @@ def main():
98
  loader = PyPDFLoader(temp_file_path)
99
  elif file_extension == ".docx" or file_extension == ".doc":
100
  loader = Docx2txtLoader(temp_file_path)
 
 
 
 
101
  elif file_extension == ".txt":
102
  loader = TextLoader(temp_file_path)
103
 
 
3
  from langchain.chains import ConversationalRetrievalChain
4
  from langchain.embeddings import HuggingFaceEmbeddings
5
  from langchain.llms import CTransformers
6
+ from langchain.llms import GooglePalm
7
  from langchain.llms import Replicate
8
  from langchain.text_splitter import CharacterTextSplitter
9
  from langchain.vectorstores import FAISS
 
11
  from langchain.document_loaders import PyPDFLoader
12
  from langchain.document_loaders import TextLoader
13
  from langchain.document_loaders import Docx2txtLoader
14
+ from langchain.document_loaders import CSVLoader
15
+ from langchain.document_loaders import UnstructuredExcelLoader
16
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
17
  import os
18
  from dotenv import load_dotenv
 
66
  #streaming=True,
67
  #callbacks=[StreamingStdOutCallbackHandler()],
68
  #model_type="llama", config={'max_new_tokens': 500, 'temperature': 0.01})
69
+ # llm = Replicate(
70
+ # streaming = True,
71
+ # model = "replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
72
+ # callbacks=[StreamingStdOutCallbackHandler()],
73
+ # input = {"temperature": 0.01, "max_length" :500,"top_p":1})
74
+ llm = GooglePlam(
75
  streaming = True,
76
+ model = "google/flan-t5-large", #" models/text-bison-001"
77
  callbacks=[StreamingStdOutCallbackHandler()],
78
+ input = {"temperature": 0.07, "max_length" :1024,"top_p":0.95})
79
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
80
 
81
  chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
 
106
  loader = PyPDFLoader(temp_file_path)
107
  elif file_extension == ".docx" or file_extension == ".doc":
108
  loader = Docx2txtLoader(temp_file_path)
109
+ elif file_extension == ".xlsx" or file_extension == ".xls":
110
+ loader = UnstructuredExcelLoader(temp_file_path)
111
+ elif file_extension == ".csv":
112
+ loader = CSVLoader(temp_file_path)
113
  elif file_extension == ".txt":
114
  loader = TextLoader(temp_file_path)
115
 
requirements.txt CHANGED
@@ -11,4 +11,5 @@ huggingface-hub
11
  pypdf
12
  python-dotenv
13
  replicate
14
- docx2txt
 
 
11
  pypdf
12
  python-dotenv
13
  replicate
14
+ docx2txt
15
+ google-generativeai