Hisab Cloud commited on
Commit
0dd56ba
·
1 Parent(s): 61f2ad2

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -67,22 +67,22 @@ def create_conversational_chain(vector_store):
67
  #streaming=True,
68
  #callbacks=[StreamingStdOutCallbackHandler()],
69
  #model_type="llama", config={'max_new_tokens': 500, 'temperature': 0.01})
70
- # llm = Replicate(
71
- # streaming = True,
72
- # model = "replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
73
- # callbacks=[StreamingStdOutCallbackHandler()],
74
- # input = {"temperature": 0.01, "max_length" :500,"top_p":1})
75
  # llm = GooglePalm(
76
  # streaming = True,
77
  # model = "models/text-bison-001", # "google/flan-t5-large"
78
  # callbacks=[StreamingStdOutCallbackHandler()],
79
  # input = {"temperature": 0.7, "max_length" :800,"top_p":1})
80
- llm = AzureOpenAI(
81
- streaming = True,
82
- deployment_name="HCloudChat",
83
- model_name="gpt-35-turbo",
84
- callbacks=[StreamingStdOutCallbackHandler()])
85
- # input = {"temperature": 0.7,"top_p":1})
86
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
87
 
88
  chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
 
67
  #streaming=True,
68
  #callbacks=[StreamingStdOutCallbackHandler()],
69
  #model_type="llama", config={'max_new_tokens': 500, 'temperature': 0.01})
70
+ llm = Replicate(
71
+ streaming = True,
72
+ model = "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
73
+ callbacks=[StreamingStdOutCallbackHandler()],
74
+ input = {"temperature": 0.5, "max_length" :2000,"top_p":1})
75
  # llm = GooglePalm(
76
  # streaming = True,
77
  # model = "models/text-bison-001", # "google/flan-t5-large"
78
  # callbacks=[StreamingStdOutCallbackHandler()],
79
  # input = {"temperature": 0.7, "max_length" :800,"top_p":1})
80
+ # llm = AzureOpenAI(
81
+ # streaming = True,
82
+ # deployment_name="HCloudChat",
83
+ # model_name="gpt-35-turbo",
84
+ # callbacks=[StreamingStdOutCallbackHandler()],
85
+ # input = {"temperature": 0.5,"top_p":1})
86
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
87
 
88
  chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',