odaly commited on
Commit
e2109be
Β·
verified Β·
1 Parent(s): f348b55

update pages/02_πŸŒ‹_Multimodal.py

Browse files
Files changed (1) hide show
  1. pages/02_πŸŒ‹_Multimodal.py +197 -198
pages/02_πŸŒ‹_Multimodal.py CHANGED
@@ -1,198 +1,197 @@
1
- import streamlit as st
2
- import requests
3
- import base64
4
- from PIL import Image
5
- from io import BytesIO
6
- import json
7
- import ollama
8
-
9
- st.set_page_config(
10
- page_title="LLaVA Playground",
11
- page_icon="πŸŒ‹",
12
- layout="wide",
13
- initial_sidebar_state="expanded",
14
- )
15
-
16
-
17
- def img_to_base64(image):
18
- """
19
- Convert an image to base64 format.
20
-
21
- Args:
22
- image: PIL.Image - The image to be converted.
23
- Returns:
24
- str: The base64 encoded image.
25
- """
26
- buffered = BytesIO()
27
- image.save(buffered, format="PNG")
28
- return base64.b64encode(buffered.getvalue()).decode()
29
-
30
-
31
- def get_allowed_model_names(models_info: dict) -> tuple:
32
- """
33
- Returns a tuple containing the names of the allowed models.
34
- """
35
- allowed_models = ["bakllava:latest", "llava:latest"]
36
- return tuple(
37
- model
38
- for model in allowed_models
39
- if model in [m["name"] for m in models_info["models"]]
40
- )
41
-
42
-
43
- def main():
44
- st.subheader("LLaVA 1.6 Playground", divider="red", anchor=False)
45
-
46
- models_info = ollama.list()
47
- available_models = get_allowed_model_names(models_info)
48
- missing_models = set(["bakllava:latest", "llava:latest"]) - set(available_models)
49
-
50
- col_1, col_2 = st.columns(2)
51
- with col_1.popover("βš™οΈ Model Management", help="Manage models here"):
52
- if not available_models:
53
- st.error("No allowed models are available.", icon="😳")
54
- model_to_download = st.selectbox(
55
- "Select a model to download", ["bakllava:latest", "llava:latest"]
56
- )
57
- if st.button(f"Download {model_to_download}"):
58
- try:
59
- ollama.pull(model_to_download)
60
- st.toast(
61
- f"""Downloaded model: {
62
- model_to_download}""",
63
- icon="βœ…",
64
- )
65
- st.rerun()
66
- except Exception as e:
67
- st.error(
68
- f"""Failed to download model: {
69
- model_to_download}. Error: {str(e)}""",
70
- icon="😳",
71
- )
72
- else:
73
- if missing_models:
74
- model_to_download = st.selectbox(
75
- ":green[**πŸ“₯ DOWNLOAD MODEL**]", list(missing_models)
76
- )
77
- if st.button(f":green[Download **_{model_to_download}_**]"):
78
- try:
79
- ollama.pull(model_to_download)
80
- st.toast(
81
- f"""Downloaded model: {
82
- model_to_download}""",
83
- icon="βœ…",
84
- )
85
- st.rerun()
86
- except Exception as e:
87
- st.error(
88
- f"""Failed to download model: {
89
- model_to_download}. Error: {str(e)}""",
90
- icon="😳",
91
- )
92
-
93
- selected_model = st.selectbox(":red[**⛔️ DELETE MODEL**]", available_models)
94
- if st.button(f"Delete **_{selected_model}_**", type="primary"):
95
- try:
96
- ollama.delete(selected_model)
97
- st.toast(f"Deleted model: {selected_model}", icon="βœ…")
98
- st.rerun()
99
- except Exception as e:
100
- st.error(
101
- f"""Failed to delete model: {
102
- selected_model}. Error: {str(e)}""",
103
- icon="😳",
104
- )
105
-
106
- if not available_models:
107
- return
108
-
109
- selected_model = col_2.selectbox(
110
- "Pick a model available locally on your system ↓", available_models, key=1
111
- )
112
-
113
- if "chats" not in st.session_state:
114
- st.session_state.chats = []
115
-
116
- if "uploaded_file_state" not in st.session_state:
117
- st.session_state.uploaded_file_state = None
118
-
119
- uploaded_file = st.file_uploader(
120
- "Upload an image for analysis", type=["png", "jpg", "jpeg"]
121
- )
122
-
123
- col1, col2 = st.columns(2)
124
-
125
- with col2:
126
- container1 = st.container(height=500, border=True)
127
- with container1:
128
- if uploaded_file is not None:
129
- st.session_state.uploaded_file_state = uploaded_file.getvalue()
130
- image = Image.open(BytesIO(st.session_state.uploaded_file_state))
131
- st.image(image, caption="Uploaded image")
132
-
133
- with col1:
134
- container2 = st.container(height=500, border=True)
135
-
136
- if uploaded_file is not None:
137
- for message in st.session_state.chats:
138
- avatar = "πŸŒ‹" if message["role"] == "assistant" else "🫠"
139
- with container2.chat_message(message["role"], avatar=avatar):
140
- if message["role"] == "user":
141
- st.markdown(message["content"])
142
- else:
143
- st.markdown(message["content"])
144
-
145
- if user_input := st.chat_input(
146
- "Question about the image...", key="chat_input"
147
- ):
148
- st.session_state.chats.append({"role": "user", "content": user_input})
149
- container2.chat_message("user", avatar="🫠").markdown(user_input)
150
-
151
- image_base64 = img_to_base64(image)
152
- API_URL = "http://localhost:11434/api/generate"
153
- headers = {
154
- "Content-Type": "application/json",
155
- "Accept": "application/json",
156
- }
157
- data = {
158
- "model": selected_model,
159
- "prompt": user_input,
160
- "images": [image_base64],
161
- }
162
-
163
- with container2.chat_message("assistant", avatar="πŸŒ‹"):
164
- with st.spinner(":blue[processing...]"):
165
- response = requests.post(API_URL, json=data, headers=headers)
166
- if response.status_code == 200:
167
- response_lines = response.text.split("\n")
168
- llava_response = ""
169
- for line in response_lines:
170
- if line.strip(): # Skip empty lines
171
- try:
172
- response_data = json.loads(line)
173
- if "response" in response_data:
174
- llava_response += response_data["response"]
175
- except json.JSONDecodeError:
176
- pass # Skip invalid JSON lines
177
- if llava_response:
178
- st.markdown(llava_response)
179
- else:
180
- st.error(
181
- f"""No response received from {
182
- selected_model}.""",
183
- icon="😳",
184
- )
185
- else:
186
- st.error(
187
- f"""Failed to get a response from {
188
- selected_model}.""",
189
- icon="😳",
190
- )
191
-
192
- st.session_state.chats.append(
193
- {"role": "assistant", "content": llava_response}
194
- )
195
-
196
-
197
- if __name__ == "__main__":
198
- main()
 
1
+ import streamlit as st
2
+ import requests
3
+ import base64
4
+ from PIL import Image
5
+ from io import BytesIO
6
+ import json
7
+
8
+ st.set_page_config(
9
+ page_title="LLaVA Playground",
10
+ page_icon="πŸŒ‹",
11
+ layout="wide",
12
+ initial_sidebar_state="expanded",
13
+ )
14
+
15
+
16
+ def img_to_base64(image):
17
+ """
18
+ Convert an image to base64 format.
19
+
20
+ Args:
21
+ image: PIL.Image - The image to be converted.
22
+ Returns:
23
+ str: The base64 encoded image.
24
+ """
25
+ buffered = BytesIO()
26
+ image.save(buffered, format="PNG")
27
+ return base64.b64encode(buffered.getvalue()).decode()
28
+
29
+
30
+ def get_allowed_model_names(models_info: dict) -> tuple:
31
+ """
32
+ Returns a tuple containing the names of the allowed models.
33
+ """
34
+ allowed_models = ["bakllava:latest", "llava:latest"]
35
+ return tuple(
36
+ model
37
+ for model in allowed_models
38
+ if model in [m["name"] for m in models_info["models"]]
39
+ )
40
+
41
+
42
+ def main():
43
+ st.subheader("LLaVA 1.6 Playground", divider="red", anchor=False)
44
+
45
+ models_info = ollama.list()
46
+ available_models = get_allowed_model_names(models_info)
47
+ missing_models = set(["bakllava:latest", "llava:latest"]) - set(available_models)
48
+
49
+ col_1, col_2 = st.columns(2)
50
+ with col_1.popover("βš™οΈ Model Management", help="Manage models here"):
51
+ if not available_models:
52
+ st.error("No allowed models are available.", icon="😳")
53
+ model_to_download = st.selectbox(
54
+ "Select a model to download", ["bakllava:latest", "llava:latest"]
55
+ )
56
+ if st.button(f"Download {model_to_download}"):
57
+ try:
58
+ ollama.pull(model_to_download)
59
+ st.toast(
60
+ f"""Downloaded model: {
61
+ model_to_download}""",
62
+ icon="βœ…",
63
+ )
64
+ st.rerun()
65
+ except Exception as e:
66
+ st.error(
67
+ f"""Failed to download model: {
68
+ model_to_download}. Error: {str(e)}""",
69
+ icon="😳",
70
+ )
71
+ else:
72
+ if missing_models:
73
+ model_to_download = st.selectbox(
74
+ ":green[**πŸ“₯ DOWNLOAD MODEL**]", list(missing_models)
75
+ )
76
+ if st.button(f":green[Download **_{model_to_download}_**]"):
77
+ try:
78
+ ollama.pull(model_to_download)
79
+ st.toast(
80
+ f"""Downloaded model: {
81
+ model_to_download}""",
82
+ icon="βœ…",
83
+ )
84
+ st.rerun()
85
+ except Exception as e:
86
+ st.error(
87
+ f"""Failed to download model: {
88
+ model_to_download}. Error: {str(e)}""",
89
+ icon="😳",
90
+ )
91
+
92
+ selected_model = st.selectbox(":red[**⛔️ DELETE MODEL**]", available_models)
93
+ if st.button(f"Delete **_{selected_model}_**", type="primary"):
94
+ try:
95
+ ollama.delete(selected_model)
96
+ st.toast(f"Deleted model: {selected_model}", icon="βœ…")
97
+ st.rerun()
98
+ except Exception as e:
99
+ st.error(
100
+ f"""Failed to delete model: {
101
+ selected_model}. Error: {str(e)}""",
102
+ icon="😳",
103
+ )
104
+
105
+ if not available_models:
106
+ return
107
+
108
+ selected_model = col_2.selectbox(
109
+ "Pick a model available locally on your system ↓", available_models, key=1
110
+ )
111
+
112
+ if "chats" not in st.session_state:
113
+ st.session_state.chats = []
114
+
115
+ if "uploaded_file_state" not in st.session_state:
116
+ st.session_state.uploaded_file_state = None
117
+
118
+ uploaded_file = st.file_uploader(
119
+ "Upload an image for analysis", type=["png", "jpg", "jpeg"]
120
+ )
121
+
122
+ col1, col2 = st.columns(2)
123
+
124
+ with col2:
125
+ container1 = st.container(height=500, border=True)
126
+ with container1:
127
+ if uploaded_file is not None:
128
+ st.session_state.uploaded_file_state = uploaded_file.getvalue()
129
+ image = Image.open(BytesIO(st.session_state.uploaded_file_state))
130
+ st.image(image, caption="Uploaded image")
131
+
132
+ with col1:
133
+ container2 = st.container(height=500, border=True)
134
+
135
+ if uploaded_file is not None:
136
+ for message in st.session_state.chats:
137
+ avatar = "πŸŒ‹" if message["role"] == "assistant" else "🫠"
138
+ with container2.chat_message(message["role"], avatar=avatar):
139
+ if message["role"] == "user":
140
+ st.markdown(message["content"])
141
+ else:
142
+ st.markdown(message["content"])
143
+
144
+ if user_input := st.chat_input(
145
+ "Question about the image...", key="chat_input"
146
+ ):
147
+ st.session_state.chats.append({"role": "user", "content": user_input})
148
+ container2.chat_message("user", avatar="🫠").markdown(user_input)
149
+
150
+ image_base64 = img_to_base64(image)
151
+ API_URL = "http://localhost:11434/api/generate"
152
+ headers = {
153
+ "Content-Type": "application/json",
154
+ "Accept": "application/json",
155
+ }
156
+ data = {
157
+ "model": selected_model,
158
+ "prompt": user_input,
159
+ "images": [image_base64],
160
+ }
161
+
162
+ with container2.chat_message("assistant", avatar="πŸŒ‹"):
163
+ with st.spinner(":blue[processing...]"):
164
+ response = requests.post(API_URL, json=data, headers=headers)
165
+ if response.status_code == 200:
166
+ response_lines = response.text.split("\n")
167
+ llava_response = ""
168
+ for line in response_lines:
169
+ if line.strip(): # Skip empty lines
170
+ try:
171
+ response_data = json.loads(line)
172
+ if "response" in response_data:
173
+ llava_response += response_data["response"]
174
+ except json.JSONDecodeError:
175
+ pass # Skip invalid JSON lines
176
+ if llava_response:
177
+ st.markdown(llava_response)
178
+ else:
179
+ st.error(
180
+ f"""No response received from {
181
+ selected_model}.""",
182
+ icon="😳",
183
+ )
184
+ else:
185
+ st.error(
186
+ f"""Failed to get a response from {
187
+ selected_model}.""",
188
+ icon="😳",
189
+ )
190
+
191
+ st.session_state.chats.append(
192
+ {"role": "assistant", "content": llava_response}
193
+ )
194
+
195
+
196
+ if __name__ == "__main__":
197
+ main()