thuanan commited on
Commit
01b145c
·
verified ·
1 Parent(s): 38b10b7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +266 -0
app.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from typing import Optional, Tuple, Any
3
+ import numpy as np
4
+ from PIL import Image, ImageOps
5
+
6
+ def _to_pil(img: Any) -> Optional[Any]:
7
+ if img is None:
8
+ return None
9
+ if Image is None:
10
+ raise RuntimeError("Pillow not available. Please install 'pillow'.")
11
+ if isinstance(img, Image.Image):
12
+ return img
13
+ arr = np.asarray(img)
14
+ if not (arr.ndim == 2 or (arr.ndim == 3 and arr.shape[2] in (3, 4))):
15
+ raise ValueError("Unsupported image array shape")
16
+ return Image.fromarray(arr.astype(np.uint8))
17
+
18
+
19
+ def preprocess_image(img: Any, max_side: int = 512, progress: Optional[gr.Progress] = None) -> Optional[Any]:
20
+ if img is None:
21
+ gr.Warning("Please upload an image first.")
22
+ return None
23
+ if progress:
24
+ progress(0, desc="Loading image…")
25
+ pil = _to_pil(img)
26
+ if pil is None:
27
+ return None
28
+ if progress:
29
+ progress(0.3, desc="Resizing…")
30
+ # Keep aspect ratio, cap the longest side
31
+ w, h = pil.size
32
+ scale = min(1.0, max_side / max(w, h))
33
+ if scale < 1.0:
34
+ pil = pil.resize((int(w * scale), int(h * scale)))
35
+ if progress:
36
+ progress(0.7, desc="Auto-contrast…")
37
+ pil = ImageOps.autocontrast(pil)
38
+ if progress:
39
+ progress(1.0, desc="Done")
40
+ return pil
41
+
42
+
43
+ def detect_edges(img: Any, strength: float = 1.0, progress: Optional[gr.Progress] = None) -> Optional[Any]:
44
+ if img is None:
45
+ gr.Warning("Please run Preprocess first or upload an image.")
46
+ return None
47
+ pil = _to_pil(img).convert("L") # grayscale
48
+ if progress:
49
+ progress(0.2, desc="Computing gradients…")
50
+ arr = np.asarray(pil, dtype=np.float32)
51
+ # Use numpy gradient as a simple edge detector (fast and dependency-free)
52
+ gy, gx = np.gradient(arr)
53
+ mag = np.hypot(gx, gy)
54
+ mag *= (255.0 / (mag.max() + 1e-6))
55
+ if progress:
56
+ progress(0.7, desc="Applying strength…")
57
+ mag = np.clip(mag * float(max(0.1, strength)), 0, 255).astype(np.uint8)
58
+ out = Image.fromarray(mag)
59
+ if progress:
60
+ progress(1.0, desc="Done")
61
+ return out
62
+
63
+
64
+ def enhance_image(img: Any, progress: Optional[gr.Progress] = None) -> Optional[Any]:
65
+ if img is None:
66
+ gr.Warning("Please run Detect Edges first.")
67
+ return None
68
+ pil = _to_pil(img)
69
+ if progress:
70
+ progress(0.5, desc="Enhancing…")
71
+ # Simple enhancement via auto-contrast again; could be extended
72
+ pil = ImageOps.autocontrast(pil)
73
+ if progress:
74
+ progress(1.0, desc="Done")
75
+ return pil
76
+
77
+
78
+ def run_all_image(image: Any, strength: float = 1.0, progress: Optional[gr.Progress] = None):
79
+ if image is None:
80
+ gr.Warning("Please upload an image.")
81
+ return None, None, None
82
+ # Use the same progress object for simplicity
83
+ p = preprocess_image(image, progress=progress)
84
+ e = detect_edges(p, strength=strength, progress=progress)
85
+ h = enhance_image(e, progress=progress)
86
+ return p, e, h
87
+
88
+
89
+ # -----------------------
90
+ # Text pipeline helpers
91
+ # -----------------------
92
+ def clean_text(text: str) -> str:
93
+ if not text:
94
+ gr.Warning("Please enter text.")
95
+ return ""
96
+ # Normalize whitespace and quotes
97
+ cleaned = " ".join(text.strip().split())
98
+ return cleaned
99
+
100
+
101
+ def summarize_text(text: str, max_sentences: int = 2) -> str:
102
+ if not text:
103
+ gr.Warning("Please clean the text first.")
104
+ return ""
105
+ # Naive sentence-based summarization: pick first N sentences
106
+ import re
107
+
108
+ sents = re.split(r"(?<=[.!?])\s+", text)
109
+ summary = " ".join(sents[: max(1, int(max_sentences))])
110
+ return summary
111
+
112
+
113
+ def sentiment(text: str) -> Tuple[str, float]:
114
+ if not text:
115
+ gr.Warning("Please provide text.")
116
+ return ("neutral", 0.0)
117
+ # Tiny lexicon-based scorer
118
+ pos = {"good", "great", "excellent", "amazing", "love", "like", "happy", "awesome", "fantastic"}
119
+ neg = {"bad", "terrible", "awful", "hate", "dislike", "sad", "poor", "horrible", "worse"}
120
+ words = [w.strip(".,!?;:").lower() for w in text.split()]
121
+ score = sum(1 for w in words if w in pos) - sum(1 for w in words if w in neg)
122
+ label = "positive" if score > 0 else ("negative" if score < 0 else "neutral")
123
+ # Normalize score into [-1, 1] by a simple squash
124
+ norm = max(1.0, len(words) / 10.0)
125
+ val = float(score / norm)
126
+ # Clamp to [-1, 1]
127
+ val = max(-1.0, min(1.0, val))
128
+ return (label, val)
129
+
130
+
131
+ with gr.Blocks(title="Complex Multi-step Workflows", theme=gr.themes.Soft()) as demo:
132
+ gr.Markdown("""
133
+ # Complex Apps with Gradio Blocks
134
+ Multi-step workflows across image and text pipelines. Each step updates state and UI.
135
+ """)
136
+
137
+ with gr.Tabs():
138
+ # ---------------- Image pipeline tab ----------------
139
+ with gr.TabItem("Image Pipeline"):
140
+ with gr.Row():
141
+ with gr.Column(scale=1):
142
+ image_in = gr.Image(label="Upload Image", type="pil")
143
+ strength = gr.Slider(0.1, 3.0, value=1.0, step=0.1, label="Edge Strength")
144
+ # Removed Demo Delay slider
145
+ with gr.Row():
146
+ btn_pre = gr.Button("Step 1: Preprocess")
147
+ btn_edge = gr.Button("Step 2: Detect Edges")
148
+ btn_enh = gr.Button("Step 3: Enhance")
149
+ with gr.Row():
150
+ btn_run_all = gr.Button("Run All", variant="primary")
151
+ btn_reset_img = gr.Button("Reset")
152
+
153
+ # Internal states to pass between steps
154
+ st_pre = gr.State()
155
+ st_edge = gr.State()
156
+
157
+ with gr.Column(scale=1):
158
+ out_pre = gr.Image(label="Preprocessed", interactive=False)
159
+ out_edge = gr.Image(label="Edges", interactive=False)
160
+ out_enh = gr.Image(label="Enhanced", interactive=False)
161
+
162
+ # Wiring events for image pipeline
163
+ def _preprocess_and_store(img, progress=gr.Progress(track_tqdm=True)):
164
+ p = preprocess_image(img, progress=progress)
165
+ return p, p
166
+
167
+ btn_pre.click(_preprocess_and_store, inputs=[image_in], outputs=[out_pre, st_pre])
168
+
169
+ def _edge_and_store(img_pre, k, progress=gr.Progress(track_tqdm=True)):
170
+ if img_pre is None:
171
+ gr.Warning("Run Step 1 first.")
172
+ return None, None
173
+ e = detect_edges(img_pre, strength=k, progress=progress)
174
+ return e, e
175
+
176
+ btn_edge.click(_edge_and_store, inputs=[st_pre, strength], outputs=[out_edge, st_edge])
177
+
178
+ def _enhance(img_edge, progress=gr.Progress(track_tqdm=True)):
179
+ if img_edge is None:
180
+ gr.Warning("Run Step 2 first.")
181
+ return None
182
+ return enhance_image(img_edge, progress=progress)
183
+
184
+ btn_enh.click(_enhance, inputs=[st_edge], outputs=out_enh)
185
+
186
+ def _run_all(img, k, progress=gr.Progress(track_tqdm=True)):
187
+ p, e, h = run_all_image(img, k, progress=progress)
188
+ # Also store states for continuity
189
+ return p, e, h, p, e
190
+
191
+ btn_run_all.click(_run_all, inputs=[image_in, strength], outputs=[out_pre, out_edge, out_enh, st_pre, st_edge])
192
+
193
+ def _reset_img():
194
+ return None, None, None, None, None
195
+
196
+ btn_reset_img.click(_reset_img, outputs=[image_in, out_pre, out_edge, out_enh, st_pre])
197
+
198
+ # ---------------- Text pipeline tab ----------------
199
+ with gr.TabItem("Text Pipeline"):
200
+ with gr.Row():
201
+ with gr.Column(scale=1):
202
+ text_in = gr.Textbox(label="Input Text", lines=8, placeholder="Paste or type some text…")
203
+ with gr.Accordion("Options", open=False):
204
+ max_sents = gr.Slider(1, 5, value=2, step=1, label="Summary Sentences")
205
+ with gr.Row():
206
+ btn_clean = gr.Button("Step 1: Clean")
207
+ btn_sum = gr.Button("Step 2: Summarize")
208
+ btn_sent = gr.Button("Step 3: Sentiment")
209
+ with gr.Row():
210
+ btn_run_all_txt = gr.Button("Run All", variant="primary")
211
+ btn_reset_txt = gr.Button("Reset")
212
+
213
+ st_clean = gr.State()
214
+ st_sum = gr.State()
215
+
216
+ with gr.Column(scale=1):
217
+ out_clean = gr.Textbox(label="Cleaned Text", lines=8)
218
+ out_sum = gr.Textbox(label="Summary", lines=6)
219
+ out_sent = gr.Label(label="Sentiment")
220
+
221
+ # Wiring events for text pipeline
222
+ def _clean_and_store(t):
223
+ c = clean_text(t)
224
+ return c, c
225
+
226
+ btn_clean.click(_clean_and_store, inputs=text_in, outputs=[out_clean, st_clean])
227
+
228
+ def _summarize_and_store(c, n):
229
+ if not c:
230
+ gr.Warning("Run Step 1 first.")
231
+ return "", ""
232
+ s = summarize_text(c, int(n))
233
+ return s, s
234
+
235
+ btn_sum.click(_summarize_and_store, inputs=[st_clean, max_sents], outputs=[out_sum, st_sum])
236
+
237
+ def _sentiment(s):
238
+ if not s:
239
+ gr.Warning("Run Step 2 first.")
240
+ return {"positive": 0.0, "neutral": 1.0, "negative": 0.0}
241
+ label, score = sentiment(s)
242
+ # Map score in [-1,1] to a 3-class distribution
243
+ p_pos = max(0.0, score)
244
+ p_neg = max(0.0, -score)
245
+ p_neu = 1.0 - abs(score)
246
+ return {"positive": round(p_pos, 3), "neutral": round(p_neu, 3), "negative": round(p_neg, 3)}
247
+
248
+ btn_sent.click(_sentiment, inputs=st_sum, outputs=out_sent)
249
+
250
+ def _run_all_txt(t, n):
251
+ c = clean_text(t)
252
+ s = summarize_text(c, int(n))
253
+ label, score = sentiment(s)
254
+ p_pos = max(0.0, score)
255
+ p_neg = max(0.0, -score)
256
+ p_neu = 1.0 - abs(score)
257
+ return c, s, {"positive": round(p_pos, 3), "neutral": round(p_neu, 3), "negative": round(p_neg, 3)}, c, s
258
+
259
+ btn_run_all_txt.click(_run_all_txt, inputs=[text_in, max_sents], outputs=[out_clean, out_sum, out_sent, st_clean, st_sum])
260
+
261
+ def _reset_txt():
262
+ return "", "", None, "", ""
263
+
264
+ btn_reset_txt.click(_reset_txt, outputs=[text_in, out_sum, out_sent, st_clean, st_sum])
265
+
266
+ demo.queue().launch()