|
|
from langchain import HuggingFaceHub |
|
|
import os |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
|
|
|
import streamlit as st |
|
|
|
|
|
|
|
|
def get_ai_response(context, question): |
|
|
llm = HuggingFaceHub( |
|
|
repo_id='EleutherAI/gpt-neo-2.7B', |
|
|
model_kwargs={ |
|
|
'temperature': 0.6, |
|
|
'max_length': 1000 |
|
|
} |
|
|
) |
|
|
|
|
|
|
|
|
prompt = f"Context: {context}\nQuestion: {question}\nAnswer:" |
|
|
response = llm(prompt) |
|
|
return response |
|
|
|
|
|
|
|
|
st.set_page_config(page_title="Q&A Demo") |
|
|
|
|
|
st.header("Langchain Application") |
|
|
|
|
|
context = st.text_area("Context: ", key="context") |
|
|
question = st.text_input("Question: ", key="question") |
|
|
submit = st.button("Ask the question") |
|
|
|
|
|
|
|
|
if submit: |
|
|
response = get_ai_response(context, question) |
|
|
st.subheader("The Response is") |
|
|
st.write(response) |
|
|
|