| | import streamlit as st |
| | from langchain_community.llms import HuggingFaceEndpoint |
| | from huggingface_hub import InferenceApi |
| | from dotenv import load_dotenv |
| | import os |
| |
|
| | |
| | load_dotenv() |
| |
|
| | |
| | api_token = os.getenv('HUGGINGFACEHUB_API_TOKEN') |
| |
|
| | api = InferenceApi(repo_id="facebook/blenderbot-3B", token=api_token) |
| |
|
| | st.set_page_config(page_title="Open AI assistant", page_icon=":robot:") |
| | st.header("Facebook Model") |
| |
|
| | if "sessionMessages" not in st.session_state: |
| | st.session_state.sessionMessages = [ |
| | {"role": "system", "content": "You are a helpful assistant."} |
| | ] |
| |
|
| | def load_answer(question): |
| | st.session_state.sessionMessages.append({"role": "user", "content": question}) |
| |
|
| | conversation_history = "" |
| | for message in st.session_state.sessionMessages: |
| | role = message["role"] |
| | content = message["content"] |
| | if role == "system": |
| | conversation_history += f"System: {content}\n" |
| | elif role == "user": |
| | conversation_history += f"User: {content}\n" |
| | elif role == "assistant": |
| | conversation_history += f"Assistant: {content}\n" |
| |
|
| | response = api(conversation_history) |
| |
|
| | if "error" not in response: |
| | assistant_answer = response[0]["generated_text"] |
| | else: |
| | assistant_answer = "Sorry, I couldn't process your request." |
| |
|
| | st.session_state.sessionMessages.append({"role": "assistant", "content": assistant_answer}) |
| |
|
| | return assistant_answer |
| |
|
| | def get_text(): |
| | input_text = st.text_input("you:", key="input") |
| | return input_text |
| |
|
| | user_input = get_text() |
| |
|
| | submit = st.button('Generate') |
| |
|
| | if submit: |
| | if not user_input.strip(): |
| | st.write("Please enter a question.") |
| | else: |
| | response = load_answer(user_input) |
| | st.subheader("Answer:") |
| | st.write(response) |
| |
|