Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,7 +9,7 @@ import streamlit as st
|
|
| 9 |
#from langchain.llms import OpenAI
|
| 10 |
|
| 11 |
#New import from langchain, which replaces the above
|
| 12 |
-
from langchain_openai import
|
| 13 |
|
| 14 |
#When deployed on huggingface spaces, this values has to be passed using Variables & Secrets setting, as shown in the video :)
|
| 15 |
#import os
|
|
@@ -18,7 +18,7 @@ os.environ["OPENAI_API_KEY"] = "sk-proj-RjUY8U-Fo9sR3NRiAppGItyftxLUA65zYoPlkGPI
|
|
| 18 |
#Function to return the response
|
| 19 |
def load_answer(question):
|
| 20 |
# "text-davinci-003" model is depreciated, so using the latest one https://platform.openai.com/docs/deprecations
|
| 21 |
-
llm =
|
| 22 |
|
| 23 |
#Last week langchain has recommended to use invoke function for the below please :)
|
| 24 |
answer=llm.invoke(question)
|
|
|
|
| 9 |
#from langchain.llms import OpenAI
|
| 10 |
|
| 11 |
#New import from langchain, which replaces the above
|
| 12 |
+
from langchain_openai import OpenAI
|
| 13 |
|
| 14 |
#When deployed on huggingface spaces, this values has to be passed using Variables & Secrets setting, as shown in the video :)
|
| 15 |
#import os
|
|
|
|
| 18 |
#Function to return the response
|
| 19 |
def load_answer(question):
|
| 20 |
# "text-davinci-003" model is depreciated, so using the latest one https://platform.openai.com/docs/deprecations
|
| 21 |
+
llm = OpenAI(model_name="gpt-3.5-turbo-instruct",temperature=0)
|
| 22 |
|
| 23 |
#Last week langchain has recommended to use invoke function for the below please :)
|
| 24 |
answer=llm.invoke(question)
|