Janish7 commited on
Commit
22f0b5c
Β·
verified Β·
1 Parent(s): 60f7554

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +244 -34
src/streamlit_app.py CHANGED
@@ -1,40 +1,250 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
 
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
 
 
 
 
 
 
 
22
 
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
1
  import streamlit as st
2
+ import torch
3
+ import clip
4
+ from PIL import Image
5
+ import numpy as np
6
+ import io
7
+ import requests
8
+ from typing import List, Tuple
9
 
10
+ # Configure page
11
+ st.set_page_config(
12
+ page_title="CLIP Classifier",
13
+ page_icon="πŸ”",
14
+ layout="wide"
15
+ )
 
 
 
 
 
 
16
 
17
+ @st.cache_resource
18
+ def load_clip_model():
19
+ """Load CLIP model and preprocessing function"""
20
+ try:
21
+ device = "cuda" if torch.cuda.is_available() else "cpu"
22
+ model, preprocess = clip.load("ViT-B/32", device=device)
23
+ return model, preprocess, device
24
+ except Exception as e:
25
+ st.error(f"Error loading CLIP model: {e}")
26
+ return None, None, None
27
 
28
+ def classify_input(model, preprocess, device, input_data, positive_prompts, negative_prompts, input_type="image"):
29
+ """
30
+ Classify input based on positive and negative prompts using CLIP
31
+ """
32
+ try:
33
+ # Prepare text prompts
34
+ all_prompts = positive_prompts + negative_prompts
35
+ text_inputs = clip.tokenize(all_prompts).to(device)
36
+
37
+ if input_type == "image":
38
+ # Process image
39
+ if isinstance(input_data, str): # URL
40
+ response = requests.get(input_data)
41
+ image = Image.open(io.BytesIO(response.content))
42
+ else: # Uploaded file
43
+ image = Image.open(input_data)
44
+
45
+ image_input = preprocess(image).unsqueeze(0).to(device)
46
+
47
+ # Get features
48
+ with torch.no_grad():
49
+ image_features = model.encode_image(image_input)
50
+ text_features = model.encode_text(text_inputs)
51
+
52
+ # Calculate similarities
53
+ similarities = (100.0 * image_features @ text_features.T).softmax(dim=-1)
54
+ similarities = similarities[0].cpu().numpy()
55
+
56
+ elif input_type == "text":
57
+ # Process text input
58
+ input_text = clip.tokenize([input_data]).to(device)
59
+
60
+ with torch.no_grad():
61
+ input_features = model.encode_text(input_text)
62
+ text_features = model.encode_text(text_inputs)
63
+
64
+ # Calculate similarities
65
+ similarities = (100.0 * input_features @ text_features.T).softmax(dim=-1)
66
+ similarities = similarities[0].cpu().numpy()
67
+
68
+ # Calculate scores for positive and negative categories
69
+ positive_scores = similarities[:len(positive_prompts)]
70
+ negative_scores = similarities[len(positive_prompts):]
71
+
72
+ positive_total = np.sum(positive_scores)
73
+ negative_total = np.sum(negative_scores)
74
+
75
+ # Determine classification
76
+ is_positive = positive_total > negative_total
77
+ confidence = max(positive_total, negative_total)
78
+
79
+ return {
80
+ 'classification': 'Positive' if is_positive else 'Negative',
81
+ 'confidence': float(confidence),
82
+ 'positive_score': float(positive_total),
83
+ 'negative_score': float(negative_total),
84
+ 'detailed_scores': {
85
+ 'positive_prompts': [(prompt, float(score)) for prompt, score in zip(positive_prompts, positive_scores)],
86
+ 'negative_prompts': [(prompt, float(score)) for prompt, score in zip(negative_prompts, negative_scores)]
87
+ }
88
+ }
89
+
90
+ except Exception as e:
91
+ st.error(f"Error during classification: {e}")
92
+ return None
93
 
94
+ def main():
95
+ st.title("πŸ” CLIP-Based Custom Classifier")
96
+ st.markdown("### Define your own positive and negative prompts to classify images or text!")
97
+
98
+ # Load model
99
+ model, preprocess, device = load_clip_model()
100
+
101
+ if model is None:
102
+ st.error("Failed to load CLIP model. Please check your installation.")
103
+ st.stop()
104
+
105
+ st.success(f"CLIP model loaded successfully on {device}")
106
+
107
+ # Sidebar for configuration
108
+ with st.sidebar:
109
+ st.header("βš™οΈ Configuration")
110
+
111
+ # Input type selection
112
+ input_type = st.radio("Select input type:", ["Image", "Text"])
113
+
114
+ st.header("πŸ“ Define Prompts")
115
+
116
+ # Positive prompts
117
+ st.subheader("βœ… Positive Prompts")
118
+ positive_prompts_text = st.text_area(
119
+ "Enter positive prompts (one per line):",
120
+ value="happy face\nsmiling person\njoyful expression\npositive emotion",
121
+ height=100,
122
+ help="These prompts define what should be classified as 'Positive'"
123
+ )
124
+
125
+ # Negative prompts
126
+ st.subheader("❌ Negative Prompts")
127
+ negative_prompts_text = st.text_area(
128
+ "Enter negative prompts (one per line):",
129
+ value="sad face\nangry person\nfrowning expression\nnegative emotion",
130
+ height=100,
131
+ help="These prompts define what should be classified as 'Negative'"
132
+ )
133
+
134
+ # Process prompts
135
+ positive_prompts = [p.strip() for p in positive_prompts_text.split('\n') if p.strip()]
136
+ negative_prompts = [p.strip() for p in negative_prompts_text.split('\n') if p.strip()]
137
+
138
+ st.info(f"Positive prompts: {len(positive_prompts)}")
139
+ st.info(f"Negative prompts: {len(negative_prompts)}")
140
+
141
+ # Main content area
142
+ col1, col2 = st.columns([1, 1])
143
+
144
+ with col1:
145
+ st.header("πŸ“₯ Input")
146
+
147
+ input_data = None
148
+
149
+ if input_type == "Image":
150
+ # Image input options
151
+ image_option = st.radio("Choose image source:", ["Upload", "URL"])
152
+
153
+ if image_option == "Upload":
154
+ uploaded_file = st.file_uploader(
155
+ "Choose an image file",
156
+ type=['png', 'jpg', 'jpeg', 'gif', 'bmp']
157
+ )
158
+ if uploaded_file:
159
+ input_data = uploaded_file
160
+ st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
161
+
162
+ else: # URL
163
+ image_url = st.text_input("Enter image URL:")
164
+ if image_url:
165
+ try:
166
+ response = requests.get(image_url)
167
+ image = Image.open(io.BytesIO(response.content))
168
+ input_data = image_url
169
+ st.image(image, caption="Image from URL", use_column_width=True)
170
+ except Exception as e:
171
+ st.error(f"Error loading image from URL: {e}")
172
+
173
+ else: # Text input
174
+ text_input = st.text_area(
175
+ "Enter text to classify:",
176
+ height=150,
177
+ placeholder="Type your text here..."
178
+ )
179
+ if text_input.strip():
180
+ input_data = text_input.strip()
181
+ st.text_area("Text to classify:", value=text_input, height=100, disabled=True)
182
+
183
+ with col2:
184
+ st.header("πŸ“Š Results")
185
+
186
+ if input_data and positive_prompts and negative_prompts:
187
+ if st.button("πŸš€ Classify", type="primary", use_container_width=True):
188
+ with st.spinner("Classifying..."):
189
+ result = classify_input(
190
+ model, preprocess, device, input_data,
191
+ positive_prompts, negative_prompts,
192
+ input_type.lower()
193
+ )
194
+
195
+ if result:
196
+ # Main classification result
197
+ classification = result['classification']
198
+ confidence = result['confidence']
199
+
200
+ # Display result with color coding
201
+ color = "green" if classification == "Positive" else "red"
202
+ st.markdown(f"### Classification: <span style='color: {color}'>{classification}</span>",
203
+ unsafe_allow_html=True)
204
+
205
+ # Confidence and scores
206
+ st.metric("Confidence", f"{confidence:.3f}")
207
+
208
+ col_pos, col_neg = st.columns(2)
209
+ with col_pos:
210
+ st.metric("Positive Score", f"{result['positive_score']:.3f}")
211
+ with col_neg:
212
+ st.metric("Negative Score", f"{result['negative_score']:.3f}")
213
+
214
+ # Detailed breakdown
215
+ st.subheader("πŸ“ˆ Detailed Scores")
216
+
217
+ # Positive prompts scores
218
+ st.write("**Positive Prompts:**")
219
+ for prompt, score in result['detailed_scores']['positive_prompts']:
220
+ st.progress(float(score), text=f"{prompt}: {score:.3f}")
221
+
222
+ # Negative prompts scores
223
+ st.write("**Negative Prompts:**")
224
+ for prompt, score in result['detailed_scores']['negative_prompts']:
225
+ st.progress(float(score), text=f"{prompt}: {score:.3f}")
226
+
227
+ elif not positive_prompts or not negative_prompts:
228
+ st.warning("⚠️ Please define both positive and negative prompts in the sidebar.")
229
+
230
+ elif not input_data:
231
+ st.info("πŸ“ Please provide input data to classify.")
232
+
233
+ # Instructions
234
+ with st.expander("ℹ️ How to use this app"):
235
+ st.markdown("""
236
+ 1. **Define Prompts**: In the sidebar, enter your positive and negative prompts (one per line)
237
+ 2. **Choose Input Type**: Select whether you want to classify images or text
238
+ 3. **Provide Input**:
239
+ - For images: Upload a file or provide a URL
240
+ - For text: Type or paste your text
241
+ 4. **Classify**: Click the "Classify" button to see results
242
+
243
+ **Examples of prompts:**
244
+ - **Image classification**: "happy dog, playful pet" vs "aggressive dog, angry animal"
245
+ - **Text sentiment**: "positive review, good experience" vs "negative review, bad experience"
246
+ - **Content moderation**: "safe content, family friendly" vs "inappropriate content, offensive material"
247
+ """)
248
 
249
+ if __name__ == "__main__":
250
+ main()