Hisab Cloud commited on
Commit
118c42c
·
1 Parent(s): e25f306

Chang to AzureOpenAI

Browse files
Files changed (2) hide show
  1. app.py +10 -4
  2. requirements.txt +2 -1
app.py CHANGED
@@ -5,6 +5,7 @@ from langchain.embeddings import HuggingFaceEmbeddings
5
  from langchain.llms import CTransformers
6
  from langchain.llms import GooglePalm
7
  from langchain.llms import Replicate
 
8
  from langchain.text_splitter import CharacterTextSplitter
9
  from langchain.vectorstores import FAISS
10
  from langchain.memory import ConversationBufferMemory
@@ -71,11 +72,16 @@ def create_conversational_chain(vector_store):
71
  # model = "replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
72
  # callbacks=[StreamingStdOutCallbackHandler()],
73
  # input = {"temperature": 0.01, "max_length" :500,"top_p":1})
74
- llm = GooglePalm(
 
 
 
 
 
75
  streaming = True,
76
- model = "models/text-bison-001", #" "google/flan-t5-large"
77
- callbacks=[StreamingStdOutCallbackHandler()],
78
- input = {"temperature": 0.07, "max_length" :800,"top_p":0.95})
79
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
80
 
81
  chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
 
5
  from langchain.llms import CTransformers
6
  from langchain.llms import GooglePalm
7
  from langchain.llms import Replicate
8
+ from langchain.llms import AzureOpenAI
9
  from langchain.text_splitter import CharacterTextSplitter
10
  from langchain.vectorstores import FAISS
11
  from langchain.memory import ConversationBufferMemory
 
72
  # model = "replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
73
  # callbacks=[StreamingStdOutCallbackHandler()],
74
  # input = {"temperature": 0.01, "max_length" :500,"top_p":1})
75
+ # llm = GooglePalm(
76
+ # streaming = True,
77
+ # model = "models/text-bison-001", # "google/flan-t5-large"
78
+ # callbacks=[StreamingStdOutCallbackHandler()],
79
+ # input = {"temperature": 0.07, "max_length" :800,"top_p":0.95})
80
+ llm = AzureOpenAI(
81
  streaming = True,
82
+ deployment_name="HCloudChat",
83
+ model_name="gpt-35-turbo",
84
+ callbacks=[StreamingStdOutCallbackHandler()])
85
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
86
 
87
  chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
requirements.txt CHANGED
@@ -12,4 +12,5 @@ pypdf
12
  python-dotenv
13
  replicate
14
  docx2txt
15
- google-generativeai
 
 
12
  python-dotenv
13
  replicate
14
  docx2txt
15
+ google-generativeai
16
+ openai