Hisab Cloud commited on
Commit
0bae6bf
·
1 Parent(s): 6d28a50

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -67,16 +67,16 @@ def create_conversational_chain(vector_store):
67
  #streaming=True,
68
  #callbacks=[StreamingStdOutCallbackHandler()],
69
  #model_type="llama", config={'max_new_tokens': 500, 'temperature': 0.01})
70
- llm = Replicate(
71
- streaming = True,
72
- model = "meta/llama-2-70b:a52e56fee2269a78c9279800ec88898cecb6c8f1df22a6483132bea266648f00",
73
- callbacks=[StreamingStdOutCallbackHandler()],
74
- input = {"temperature": 0.75, "max_length" :500,"top_p":1})
75
- # llm = GooglePalm(
76
  # streaming = True,
77
- # model = "models/text-bison-001", # "google/flan-t5-large"
78
  # callbacks=[StreamingStdOutCallbackHandler()],
79
- # input = {"temperature": 0.7, "max_length" :800,"top_p":1})
 
 
 
 
 
80
  # llm = AzureOpenAI(
81
  # streaming = True,
82
  # deployment_name="HCloudChat",
 
67
  #streaming=True,
68
  #callbacks=[StreamingStdOutCallbackHandler()],
69
  #model_type="llama", config={'max_new_tokens': 500, 'temperature': 0.01})
70
+ # llm = Replicate(
 
 
 
 
 
71
  # streaming = True,
72
+ # model = "meta/llama-2-70b:a52e56fee2269a78c9279800ec88898cecb6c8f1df22a6483132bea266648f00",
73
  # callbacks=[StreamingStdOutCallbackHandler()],
74
+ # input = {"temperature": 0.75, "max_length" :500,"top_p":1})
75
+ llm = GooglePalm(
76
+ streaming = True,
77
+ model = "models/text-bison-001", # "google/flan-t5-large"
78
+ callbacks=[StreamingStdOutCallbackHandler()],
79
+ input = {"temperature": 0.7, "max_length" :800,"top_p":1})
80
  # llm = AzureOpenAI(
81
  # streaming = True,
82
  # deployment_name="HCloudChat",