| | from langchain import HuggingFaceHub |
| |
|
| | from dotenv import load_dotenv |
| |
|
| | import streamlit as st |
| |
|
| | |
| | load_dotenv() |
| |
|
| | |
| | llm_huggingface = HuggingFaceHub(repo_id="google/flan-t5-large", model_kwargs={"temperature": 0.0, "max_length": 64}) |
| |
|
| | |
| | st.set_page_config(page_title="Chatbot") |
| | st.header('Langchain Application') |
| |
|
| | |
| | def get_huggingface_response(question): |
| | response = llm_huggingface(question) |
| | return response |
| |
|
| | |
| | user_input = st.text_input("Input: ", key="input") |
| |
|
| | |
| | submit = st.button('Generate') |
| |
|
| |
|
| | if submit: |
| | |
| | response = get_huggingface_response(user_input) |
| | st.subheader("The response is ") |
| | st.write(response) |
| |
|