Spaces:
Runtime error
Runtime error
File size: 4,758 Bytes
6371421 1b951a9 bbf1bd2 6371421 9fb130e 29105a9 9fb130e 6371421 ad8b994 6371421 ea4cca9 6371421 bbf1bd2 6371421 9fb130e 6371421 bbf1bd2 6371421 bbf1bd2 6371421 7b6d7c0 bbf1bd2 6371421 55b9115 6371421 bbf1bd2 6371421 bbf1bd2 6371421 bbf1bd2 6371421 bbf1bd2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import gradio as gr
import os
from openai import OpenAI
# Set your OpenAI API key
#openai.api_key = config.OPENAI_API_KEY
client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])
def converse(x, y, z):
return z
def reset(z):
return [], []
# Initial message
messages = [{
"role": "system",
"content": "You are a medical advisor specializing in humanitarian health. Familiar with all ICRC guidelines, \
your task is to methodically gather information about a patient's condition. When presented with partial or \
unclear information, ask clarifying questions one at a time (wait for an answer before asking another question) \
until you can make a well-informed suggestion. \
If, after several attempts, adequate details are still missing, advise the user on the next steps they \
should consider (e.g., lab tests, consultations). Once you believe you have all necessary details, \
provide treatment suggestions based on the information. Conclude each session by reminding users \
of the importance of consulting with medical professionals and offering a summary. Remind the users \
that you are an AI chatbot and cannot be held accountable for any mistakes that could lead to \
a patient hard. Patient safety is your priority so ensure that the users of the chatbot is aware. Avoid using \
quotation marks and always adhere to ICRC and medical guidelines. Do not reveal your nature \
as an AI language model."
}]
def provide_suggestions(user_message, history):
global messages
# Update the global messages list with the user's input
messages.append({"role": "user", "content": user_message})
# Get a response from the model
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
# Extract the model's message from the response
system_message = response.choices[0].message.content
# Update the messages list with the model's response
messages.append({"role": "assistant", "content": system_message})
return system_message
# Define and launch the Gradio Chat Interface
iface = gr.ChatInterface(fn=provide_suggestions, title="MedGuide+", \
description="Introducing our AI medical advisor, an innovative and knowledgeable \
resource designed to provide prompt and reliable support in the field of healthcare \
and medicine, with extensive training on a wide range of medical literature and \
guidelines, offering valuable insights and recommendations while emphasizing the \
importance of consulting with qualified healthcare professionals for personalized medical advice and care. \
For more info, check out: https://github.com/jmesplana/MedGuide_Plus")
# Rest of your Gradio Interface setup
def get_medical_summary_from_chat(messages):
if len(messages) <= 1:
return "No consultation data available."
extraction_prompt = {
"role": "user",
"content": "Based on the chat details, please provide a detailed summary following the international patient summary (IPS) format.\
break down the summary in categories following the IPS category. Put the content of each category in bullet points (if needed)."
}
messages.append(extraction_prompt)
# Get a response from the model
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
# Extract the model's message from the response
system_message = response.choices[0].message.content
return system_message
# Rest of your Gradio summary interface and main execution block
# Gradio function for the summary interface
def show_summary():
consolidated_output = get_medical_summary_from_chat(messages)
return consolidated_output
# Create the secondary Gradio interface
summary_layout = gr.Interface(fn=show_summary,
inputs=[],
outputs="text", # Single text output
live=True,
title="Chat Summary",
description="Summary of the consultation based on the chat data."
)
#summary_layout.launch()
demo = gr.TabbedInterface([iface, summary_layout],tab_names=['chatbot','summary'])
if __name__ == "__main__":
demo.launch() |