from langchain import HuggingFaceHub import os from dotenv import load_dotenv # load_dotenv() # take environment variables from .env. import streamlit as st ## Function to load OpenAI model and get responses def get_ai_response(context, question): llm = HuggingFaceHub( repo_id='EleutherAI/gpt-neo-2.7B', model_kwargs={ 'temperature': 0.6, 'max_length': 1000 } ) # input_data = {"context": context, "question": question} # response = llm(input_data) prompt = f"Context: {context}\nQuestion: {question}\nAnswer:" response = llm(prompt) return response ## Initialize our Streamlit app st.set_page_config(page_title="Q&A Demo") st.header("Langchain Application") context = st.text_area("Context: ", key="context") question = st.text_input("Question: ", key="question") submit = st.button("Ask the question") ## If ask button is clicked if submit: response = get_ai_response(context, question) st.subheader("The Response is") st.write(response)