Spaces:
Sleeping
Sleeping
| # Q&A Chatbot | |
| import langchain_community | |
| from langchain_community.llms import HuggingFaceEndpoint | |
| from langchain.chains import LLMChain | |
| from langchain.prompts import PromptTemplate | |
| # from dotenv import load_dotenv | |
| # load_dotenv() # take environment variables from .env | |
| import streamlit as st | |
| import os | |
| ## Function to load AI model and get responses. Here I can incorporate prompt template also | |
| def get_model_response(question): | |
| llm = HuggingFaceEndpoint( | |
| repo_id="mistralai/Mistral-7B-Instruct-v0.2", max_length=128, temperature=0.5) | |
| template = """Question: {question} | |
| Answer:""" | |
| prompt = PromptTemplate.from_template(template) | |
| llm_chain = LLMChain(prompt=prompt, llm=llm) | |
| response = llm_chain.invoke({"question": question}) | |
| return response | |
| ## Initialize our StreamLit app | |
| st.set_page_config(page_title="Simple Chatbot") | |
| st.header("Langchain Application - Simple Chatbot") | |
| input = st.text_input("Input: ", key="input") | |
| response = get_model_response(input) | |
| submit = st.button("Ask the question") | |
| ## If ask button is clicked | |
| if submit: | |
| st.subheader("The response is: ") | |
| st.write(response) | |