Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,49 +1,38 @@
|
|
| 1 |
-
#Hello! It seems like you want to import the Streamlit library in Python. Streamlit is a powerful open-source framework used for building web applications with interactive data visualizations and machine learning models. To import Streamlit, you'll need to ensure that you have it installed in your Python environment.
|
| 2 |
-
#Once you have Streamlit installed, you can import it into your Python script using the import statement,
|
| 3 |
-
import os
|
| 4 |
-
|
| 5 |
import streamlit as st
|
| 6 |
|
| 7 |
-
#
|
| 8 |
-
|
| 9 |
-
#from langchain.llms import OpenAI
|
| 10 |
-
|
| 11 |
-
#New import from langchain, which replaces the above
|
| 12 |
-
from langchain_openai import OpenAI
|
| 13 |
-
|
| 14 |
-
#When deployed on huggingface spaces, this values has to be passed using Variables & Secrets setting, as shown in the video :)
|
| 15 |
-
#import os
|
| 16 |
-
os.environ["OPENAI_API_KEY"] = "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"
|
| 17 |
|
| 18 |
-
#
|
|
|
|
|
|
|
| 19 |
def load_answer(question):
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
|
| 24 |
-
answer=llm.invoke(question)
|
| 25 |
return answer
|
| 26 |
|
| 27 |
|
| 28 |
-
#
|
| 29 |
-
|
| 30 |
-
|
|
|
|
|
|
|
| 31 |
|
| 32 |
-
#Gets the user input
|
| 33 |
def get_text():
|
| 34 |
-
input_text = st.text_input("You:
|
| 35 |
return input_text
|
| 36 |
|
| 37 |
|
| 38 |
-
user_input=get_text()
|
| 39 |
-
response = load_answer(user_input)
|
| 40 |
|
| 41 |
-
submit = st.button(
|
| 42 |
-
|
| 43 |
-
#If generate button is clicked
|
| 44 |
-
if submit:
|
| 45 |
|
|
|
|
|
|
|
| 46 |
st.subheader("Answer:")
|
| 47 |
-
|
| 48 |
st.write(response)
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
|
| 3 |
+
# NEW: Hugging Face Endpoint (بديل OpenAI)
|
| 4 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
+
# =============================
|
| 7 |
+
# Function to return the response
|
| 8 |
+
# =============================
|
| 9 |
def load_answer(question):
|
| 10 |
+
llm = HuggingFaceEndpoint(
|
| 11 |
+
repo_id="google/flan-t5-large",
|
| 12 |
+
temperature=0,
|
| 13 |
+
max_new_tokens=256
|
| 14 |
+
)
|
| 15 |
|
| 16 |
+
answer = llm.invoke(question)
|
|
|
|
| 17 |
return answer
|
| 18 |
|
| 19 |
|
| 20 |
+
# =============================
|
| 21 |
+
# App UI
|
| 22 |
+
# =============================
|
| 23 |
+
st.set_page_config(page_title="LangChain QuestionandAnswerApp", page_icon="🤖")
|
| 24 |
+
st.header("LangChain QuestionandAnswerApp (Free Model)")
|
| 25 |
|
|
|
|
| 26 |
def get_text():
|
| 27 |
+
input_text = st.text_input("You:", key="input")
|
| 28 |
return input_text
|
| 29 |
|
| 30 |
|
| 31 |
+
user_input = get_text()
|
|
|
|
| 32 |
|
| 33 |
+
submit = st.button("Generate")
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
+
if submit and user_input:
|
| 36 |
+
response = load_answer(user_input)
|
| 37 |
st.subheader("Answer:")
|
|
|
|
| 38 |
st.write(response)
|
|
|