arshadrana commited on
Commit
9ff0c01
·
verified ·
1 Parent(s): b685155

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +152 -0
app.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import os
4
+ from together import Together
5
+ import base64
6
+ import io
7
+
8
+ # Initialize Together client
9
+ client = None
10
+
11
+ def initialize_client(api_key=None):
12
+ global client
13
+ # Fetch the API key from the environment if it's not provided directly
14
+ api_key = os.getenv("TOGETHER_API_KEY")
15
+ print(api_key)
16
+
17
+ client = Together()
18
+
19
+
20
+ def encode_image(image_path):
21
+ with Image.open(image_path) as img:
22
+ buffered = io.BytesIO()
23
+ img.save(buffered, format="PNG")
24
+ return base64.b64encode(buffered.getvalue()).decode("utf-8")
25
+
26
+
27
+ def bot_streaming(message, history):
28
+ max_new_tokens=250
29
+ temperature=0.7
30
+ if client is None:
31
+ try:
32
+ initialize_client()
33
+ except Exception as e:
34
+ history.append((message, f"Error initializing client: {str(e)}"))
35
+ yield history
36
+ return
37
+
38
+ prompt = """
39
+ Determine if the radiator in the image shows signs of being involved in an accident or not
40
+ """
41
+
42
+
43
+ messages = [{"role": "system", "content": prompt}]
44
+
45
+ # Add history to messages
46
+ for user_msg, assistant_msg in history:
47
+ if isinstance(user_msg, str): # Text message
48
+ messages.append(
49
+ {"role": "user", "content": [{"type": "text", "text": user_msg}]}
50
+ )
51
+ elif isinstance(user_msg, dict): # Image message
52
+ image_base64 = encode_image(user_msg["image_path"])
53
+ messages.append(
54
+ {
55
+ "role": "user",
56
+ "content": [
57
+ {"type": "text", "text": user_msg.get("text", "")},
58
+ {
59
+ "type": "image_url",
60
+ "image_url": {
61
+ "url": f"data:image/png;base64,{image_base64}"
62
+ },
63
+ },
64
+ ],
65
+ }
66
+ )
67
+ messages.append(
68
+ {"role": "assistant", "content": [{"type": "text", "text": assistant_msg}]}
69
+ )
70
+
71
+ # Prepare the current message
72
+ user_message_content = []
73
+ if isinstance(message, dict):
74
+ if message.get("text"):
75
+ user_message_content.append({"type": "text", "text": message["text"]})
76
+ if message.get("files") and len(message["files"]) > 0:
77
+ image_path = message["files"][0]
78
+ image_base64 = encode_image(image_path)
79
+ user_message_content.append(
80
+ {
81
+ "type": "image_url",
82
+ "image_url": {"url": f"data:image/png;base64,{image_base64}"},
83
+ }
84
+ )
85
+ elif isinstance(message, str):
86
+ user_message_content.append({"type": "text", "text": message})
87
+
88
+ current_message = {"role": "user", "content": user_message_content}
89
+ messages.append(current_message)
90
+
91
+ # Add the user's message to the history
92
+ user_display_message = message["text"] if isinstance(message, dict) else message
93
+ history = history + [(user_display_message, "")]
94
+
95
+ try:
96
+ stream = client.chat.completions.create(
97
+ model="meta-llama/Llama-Vision-Free",
98
+ messages=messages,
99
+ max_tokens=max_new_tokens,
100
+ temperature=temperature,
101
+ stream=True,
102
+ )
103
+
104
+ response = ""
105
+ for chunk in stream:
106
+ if (
107
+ chunk.choices
108
+ and chunk.choices[0].delta
109
+ and chunk.choices[0].delta.content is not None
110
+ ):
111
+ response += chunk.choices[0].delta.content
112
+ # Update the assistant's response in the history
113
+ history[-1] = (user_display_message, response)
114
+ yield history
115
+
116
+ if not response:
117
+ history[-1] = (
118
+ user_display_message,
119
+ "No response generated. Please try again.",
120
+ )
121
+ yield history
122
+
123
+ except Exception as e:
124
+ error_message = (
125
+ "The image is too large. Please try with a smaller image or compress the existing one."
126
+ if "Request Entity Too Large" in str(e)
127
+ else f"An error occurred: {str(e)}"
128
+ )
129
+ history[-1] = (user_display_message, error_message)
130
+ yield history
131
+
132
+
133
+ # The rest of your Gradio interface code remains the same
134
+ with gr.Blocks() as demo:
135
+ gr.Markdown("# LodhranGPT Medical Chatbot")
136
+ gr.Markdown(
137
+ "Upload a medical image, and start chatting about it.or enter the text to chat about medical"
138
+ )
139
+
140
+ chatbot = gr.Chatbot()
141
+ msg = gr.MultimodalTextbox(label="")
142
+ clear = gr.Button("Clear")
143
+
144
+ msg.submit(
145
+ bot_streaming,
146
+ [msg, chatbot],
147
+ chatbot,
148
+ )
149
+ clear.click(lambda: None, None, chatbot, queue=False)
150
+
151
+ if __name__ == "__main__":
152
+ demo.launch(debug=True)