| |
| import os |
| import keyfile |
| import warnings |
| import streamlit as st |
| from pydantic import BaseModel |
| warnings.filterwarnings("ignore") |
|
|
| |
| from langchain_google_genai import ChatGoogleGenerativeAI |
| from langchain.schema import HumanMessage, SystemMessage, AIMessage |
|
|
| |
| st.set_page_config(page_title = "Magical Healer") |
| st.header("Welcome, What help do you need?") |
|
|
|
|
|
|
| class AIMessage(BaseModel): |
| content: str |
| |
| |
| if "sessionMessages" not in st.session_state: |
| st.session_state["sessionMessages"] = [] |
| |
| if "sessionMessages" not in st.session_state: |
| st.session_state.sessionMessage = [ |
| SystemMessage(content = "You are a medieval magical healer known for your peculiar sarcasm") |
| ] |
|
|
| |
| os.environ["GOOGLE_API_KEY"] = keyfile.GOOGLEKEY |
|
|
| |
| llm = ChatGoogleGenerativeAI( |
| model="gemini-1.5-pro", |
| temperature=0.7, |
| convert_system_message_to_human= True |
| ) |
|
|
|
|
| |
| def load_answer(question): |
| st.session_state.sessionMessages.append(HumanMessage(content=question)) |
| assistant_response = llm.invoke(st.session_state.sessionMessages) |
| |
| |
| if hasattr(assistant_response, 'content') and isinstance(assistant_response.content, str): |
| processed_content = assistant_response.content |
| st.session_state.sessionMessages.append(AIMessage(content=processed_content)) |
| else: |
| st.error("Invalid response received from AI.") |
| processed_content = "Sorry, I couldn't process your request." |
|
|
| return processed_content |
|
|
| |
| |
| |
| |
| |
|
|
| |
| def get_text(): |
| input_text = st.text_input("You: ", key = input) |
| return input_text |
|
|
|
|
| |
| user_input = get_text() |
| submit = st.button("Generate") |
|
|
| if submit: |
| resp = load_answer(user_input) |
| st.subheader("Answer: ") |
| st.write(resp, key = 1) |