Spaces:
Build error
Build error
File size: 4,849 Bytes
0cfe6c7 086e1da 0cfe6c7 086e1da 222662d 0e76cce 086e1da 222662d 0c39738 b0aee43 0c39738 222662d 0e76cce 086e1da 0cfe6c7 222662d bcd1db7 086e1da 222662d 086e1da 0cfe6c7 086e1da 0cfe6c7 086e1da db85c1b 086e1da db85c1b fa3a766 db85c1b 086e1da db85c1b fa3a766 db85c1b 0cfe6c7 086e1da db85c1b 086e1da fa3a766 086e1da db85c1b 086e1da fa3a766 0e76cce 086e1da db85c1b 0cfe6c7 086e1da 0cfe6c7 086e1da 0cfe6c7 086e1da db85c1b 086e1da 0316887 086e1da 0316887 086e1da 222662d 086e1da bcd1db7 086e1da eb33f7c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 | import gradio as gr
from PIL import Image
import os
from together import Together
import base64
import io
# Initialize Together client
client = None
def initialize_client(api_key=None):
global client
# Fetch the API key from the environment if it's not provided directly
api_key = os.getenv("TOGETHER_API_KEY")
print(api_key)
client = Together()
def encode_image(image_path):
with Image.open(image_path) as img:
buffered = io.BytesIO()
img.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def bot_streaming(message, history):
max_new_tokens=250
temperature=0.7
if client is None:
try:
initialize_client()
except Exception as e:
history.append((message, f"Error initializing client: {str(e)}"))
yield history
return
prompt = "You are a helpful AI assistant. Analyze the image provided (if any) and respond to the user's query or comment."
messages = [{"role": "system", "content": prompt}]
# Add history to messages
for user_msg, assistant_msg in history:
if isinstance(user_msg, str): # Text message
messages.append(
{"role": "user", "content": [{"type": "text", "text": user_msg}]}
)
elif isinstance(user_msg, dict): # Image message
image_base64 = encode_image(user_msg["image_path"])
messages.append(
{
"role": "user",
"content": [
{"type": "text", "text": user_msg.get("text", "")},
{
"type": "image_url",
"image_url": {
"url": f"data:image/png;base64,{image_base64}"
},
},
],
}
)
messages.append(
{"role": "assistant", "content": [{"type": "text", "text": assistant_msg}]}
)
# Prepare the current message
user_message_content = []
if isinstance(message, dict):
if message.get("text"):
user_message_content.append({"type": "text", "text": message["text"]})
if message.get("files") and len(message["files"]) > 0:
image_path = message["files"][0]
image_base64 = encode_image(image_path)
user_message_content.append(
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{image_base64}"},
}
)
elif isinstance(message, str):
user_message_content.append({"type": "text", "text": message})
current_message = {"role": "user", "content": user_message_content}
messages.append(current_message)
# Add the user's message to the history
user_display_message = message["text"] if isinstance(message, dict) else message
history = history + [(user_display_message, "")]
try:
stream = client.chat.completions.create(
model="meta-llama/Llama-Vision-Free",
messages=messages,
max_tokens=max_new_tokens,
temperature=temperature,
stream=True,
)
response = ""
for chunk in stream:
if (
chunk.choices
and chunk.choices[0].delta
and chunk.choices[0].delta.content is not None
):
response += chunk.choices[0].delta.content
# Update the assistant's response in the history
history[-1] = (user_display_message, response)
yield history
if not response:
history[-1] = (
user_display_message,
"No response generated. Please try again.",
)
yield history
except Exception as e:
error_message = (
"The image is too large. Please try with a smaller image or compress the existing one."
if "Request Entity Too Large" in str(e)
else f"An error occurred: {str(e)}"
)
history[-1] = (user_display_message, error_message)
yield history
# The rest of your Gradio interface code remains the same
with gr.Blocks() as demo:
gr.Markdown("# LodhranGPT Medical Chatbot")
gr.Markdown(
"Upload a medical image, and start chatting about it.or enter the text to chat about medical"
)
chatbot = gr.Chatbot()
msg = gr.MultimodalTextbox(label="")
clear = gr.Button("Clear")
msg.submit(
bot_streaming,
[msg, chatbot],
chatbot,
)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch(share=True) |