Spaces:
Sleeping
Sleeping
File size: 3,675 Bytes
8f277d4 8027f5b 8f277d4 8027f5b b9ec415 76126c6 b9ec415 8f277d4 8027f5b 8f277d4 8027f5b 313fa00 2cb3fe5 6572f8b 2cb3fe5 1ada90f ae2c7fe 56902eb 2562c68 2cb3fe5 3cec7a5 2cb3fe5 abf715e 2cb3fe5 00876a6 2cb3fe5 57202e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import streamlit as st
import time
from langchain.schema import HumanMessage, SystemMessage, AIMessage
from langchain.chat_models import ChatOpenAI
def get_chatmodel_response(question):
# Retry logic
max_retries = 3
retries = 0
while retries < max_retries:
try:
st.session_state['flowmessages'].append(HumanMessage(content=question))
answer = chat(st.session_state['flowmessages'])
st.session_state['flowmessages'].append(AIMessage(content=answer.content))
return answer.content
except Exception as e:
print(f"Error: {e}")
if "Rate limit" in str(e):
print(f"Rate limit exceeded. Waiting and retrying...")
time.sleep(5) # Adjust the waiting time as needed
retries += 1
else:
print("Unhandled exception. Please try again later.")
break
print("Exceeded the maximum number of retries. Please try again later.")
return None
# Streamlit app setup
st.set_page_config(page_title="Doctor AI", page_icon="💊", layout="centered", initial_sidebar_state="collapsed")
# # Set page background
# st.markdown(
# """
# <style>
# body {
# background-color: #f0f0f0; /* Set your desired background color */
# }
# .stTextInput, .stButton {
# border-radius: 50px; /* Add border-radius for a nicer input bar */
# padding: 50px; /* Adjust padding for better spacing */
# }
# </style>
# """,
# unsafe_allow_html=True
# )
st.header("Hello, I'm a Doctor AI. How can I help you?")
from dotenv import load_dotenv
load_dotenv()
import os
# ChatOpenAI class
chat = ChatOpenAI(temperature=0.5)
if 'flowmessages' not in st.session_state:
st.session_state['flowmessages'] = [
SystemMessage(content="Your are an AI Doctor assistant. A user will give an input of what he is suffering from or what health problem he has, you should suggest the user with correct medicine and tell the user how to recover fastly from it. Gve a short and sharp answer. If the input is different from a body or health issue or any other medical issues, tell the user who you are and ask the user to provide the appropriate input.")
]
# Streamlit UI
input_question = st.text_input("Type here.", key="input",autocomplete="off")
# Apply custom HTML and CSS for styling
st.markdown(
"""
<style>
.stTextInput {
border-radius: 15px;
padding: 12px;
margin-top: 10px;
margin-bottom: 10px;
box-shadow: 2px 2px 5px #888888;
border: 1px solid #dddddd;
font-size: 16px;
width: 700px;
/* Add any additional styling here */
}
/* Hide the "press enter to apply" message */
.stTextInput::-webkit-input-placeholder {
color: transparent;
}
.stTextInput:-moz-placeholder {
color: transparent;
}
.stTextInput::-moz-placeholder {
color: transparent;
}
.stTextInput:-ms-input-placeholder {
color: transparent;
}
</style>
""",
unsafe_allow_html=True
)
submit = st.button("Submit")
# If the "Ask" button is clicked
if submit:
# Display loading message while processing
with st.spinner("Analyzing..."):
response = get_chatmodel_response(input_question)
if response is not None:
# st.subheader("Here you go,")
st.write(response)
else:
st.subheader("Error: Unable to get response. Please try again later.") |