| | import os |
| | import streamlit as st |
| | from dotenv import load_dotenv |
| | from langchain import HuggingFaceHub |
| |
|
| | |
| | load_dotenv() |
| |
|
| | |
| | HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") |
| |
|
| | |
| | def load_answer(question): |
| | try: |
| | |
| | llm = HuggingFaceHub( |
| | repo_id="mistralai/Mistral-7B-Instruct-v0.3", |
| | huggingfacehub_api_token=HUGGINGFACE_API_TOKEN, |
| | model_kwargs={"temperature": 0.1} |
| | ) |
| | |
| | |
| | answer = llm.predict(question) |
| | return answer |
| | except Exception as e: |
| | |
| | return f"Error: {str(e)}" |
| |
|
| | |
| | st.set_page_config(page_title="Hugging Face Demo", page_icon=":robot:") |
| | st.header("Hugging Face Demo") |
| |
|
| | |
| | def get_text(): |
| | input_text = st.text_input("You: ", key="input") |
| | return input_text |
| |
|
| | |
| | user_input = get_text() |
| |
|
| | |
| | submit = st.button('Generate') |
| |
|
| | |
| | if submit and user_input: |
| | response = load_answer(user_input) |
| | st.subheader("Answer:") |
| | st.write(response) |
| | elif submit: |
| | st.warning("Please enter a question.") |