00Boobs00 commited on
Commit
162518d
·
verified ·
1 Parent(s): aad7337

Update app.py from anycoder

Browse files
Files changed (1) hide show
  1. app.py +280 -0
app.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import random
4
+ import time
5
+ from datetime import datetime
6
+ from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
7
+ import json
8
+
9
+ # --- Configuration & Model Loading ---
10
+ # Replicating the logic of your ComfyUI JSON:
11
+ # - CheckpointLoaderSimple -> runwayml/stable-diffusion-v1-5
12
+ # - KSampler -> EulerAncestralDiscreteScheduler
13
+ # - EmptyLatentImage -> 512x512
14
+
15
+ MODEL_ID = "runwayml/stable-diffusion-v1-5"
16
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
17
+
18
+ # Load the pipeline (simulating the backend compute)
19
+ print(f"Loading model {MODEL_ID} on {DEVICE}...")
20
+ try:
21
+ pipe = StableDiffusionPipeline.from_pretrained(
22
+ MODEL_ID,
23
+ torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32,
24
+ safety_checker=None # Disable for speed/demo purposes
25
+ )
26
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
27
+ if DEVICE == "cuda":
28
+ pipe.to(DEVICE)
29
+ print("Model loaded successfully.")
30
+ except Exception as e:
31
+ print(f"Error loading model: {e}")
32
+ pipe = None
33
+
34
+ # --- Core Logic Functions ---
35
+
36
+ def generate_image_with_memory(
37
+ prompt,
38
+ negative_prompt,
39
+ steps,
40
+ cfg,
41
+ seed,
42
+ memory_db
43
+ ):
44
+ """
45
+ Generates an image and updates the 'Long Term Memory' database.
46
+ """
47
+ if pipe is None:
48
+ return None, memory_db, "Error: Model not loaded. Check GPU availability."
49
+
50
+ start_time = time.time()
51
+
52
+ # Handle Seed (0 = random)
53
+ if seed == 0:
54
+ seed = random.randint(0, 2147483647)
55
+
56
+ generator = torch.Generator(device=DEVICE).manual_seed(seed)
57
+
58
+ # Generate Image (Replicating KSampler + VAE Decode)
59
+ try:
60
+ result = pipe(
61
+ prompt=prompt,
62
+ negative_prompt=negative_prompt,
63
+ num_inference_steps=int(steps),
64
+ guidance_scale=float(cfg),
65
+ height=512,
66
+ width=512,
67
+ generator=generator
68
+ )
69
+ image = result.images[0]
70
+ except Exception as e:
71
+ return None, memory_db, f"Generation Failed: {str(e)}"
72
+
73
+ # Update Memory Database
74
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
75
+ new_entry = {
76
+ "timestamp": timestamp,
77
+ "prompt": prompt,
78
+ "negative": negative_prompt,
79
+ "seed": seed,
80
+ "steps": steps,
81
+ "cfg": cfg
82
+ }
83
+
84
+ # Prepend new entry to memory
85
+ updated_memory = [new_entry] + memory_db
86
+
87
+ processing_time = f"{time.time() - start_time:.2f}s"
88
+
89
+ return image, updated_memory, f"Generated in {processing_time} | Seed: {seed}"
90
+
91
+ def recall_prompt(memory_db, evt: gr.SelectData):
92
+ """
93
+ Recalls a specific prompt from history based on user selection.
94
+ """
95
+ if memory_db is None or len(memory_db) == 0:
96
+ return "", "", 0, 20, 7.0
97
+
98
+ index = evt.index[0]
99
+ selected = memory_db[index]
100
+
101
+ return (
102
+ selected["prompt"],
103
+ selected["negative"],
104
+ int(selected["seed"]),
105
+ int(selected["steps"]),
106
+ float(selected["cfg"])
107
+ )
108
+
109
+ def export_memory(memory_db):
110
+ """
111
+ Exports the memory database to a downloadable JSON file.
112
+ """
113
+ if not memory_db:
114
+ return None
115
+ json_str = json.dumps(memory_db, indent=4)
116
+ return json_str
117
+
118
+ # --- Gradio 6 UI Construction ---
119
+
120
+ with gr.Blocks() as demo:
121
+
122
+ # Header
123
+ gr.HTML("""
124
+ <div style="text-align: center; margin-bottom: 20px;">
125
+ <h1>🧠 AI-Native Memory Core</h1>
126
+ <p>Automated Long Term Super Intelligent Human Recall | Persistent Database</p>
127
+ <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #007bff; text-decoration: none;">Built with anycoder</a>
128
+ </div>
129
+ """)
130
+
131
+ with gr.Row():
132
+ # --- Sidebar: Controls & Memory Database ---
133
+ with gr.Sidebar(width=350):
134
+ gr.Markdown("### ⚙️ Generation Parameters")
135
+
136
+ with gr.Accordion("Advanced Settings", open=True):
137
+ steps = gr.Slider(minimum=1, maximum=150, value=20, step=1, label="Sampling Steps", info="Higher = more detail")
138
+ cfg = gr.Slider(minimum=1, maximum=30, value=7.0, step=0.5, label="CFG Scale", info="Prompt adherence")
139
+ seed = gr.Number(value=0, label="Seed (0 = Random)", precision=0)
140
+
141
+ gr.Markdown("---")
142
+ gr.Markdown("### 💾 Persistent Memory Database")
143
+ gr.Markdown("Click a row to recall prompt settings.")
144
+
145
+ memory_display = gr.Dataframe(
146
+ label="Prompt History",
147
+ headers=["Time", "Prompt", "Seed"],
148
+ datatype=["str", "str", "number"],
149
+ interactive=False,
150
+ wrap=True,
151
+ height=300
152
+ )
153
+
154
+ with gr.Row():
155
+ export_btn = gr.Button("💾 Export Memory (JSON)", size="sm")
156
+ download_file = gr.File(visible=False, label="Download Database")
157
+
158
+ gr.Markdown("---")
159
+ gr.Markdown("### 📱 App Access")
160
+ # Simulating the APK request via PWA/Install instructions
161
+ gr.Markdown("""
162
+ <small>
163
+ To install on Android:<br>
164
+ 1. Open this in Chrome<br>
165
+ 2. Tap Menu (⋮)<br>
166
+ 3. Tap "Add to Home Screen"
167
+ </small>
168
+ """)
169
+ app_link_btn = gr.Button("📲 Open App View", link="/?view=standalone", variant="primary")
170
+
171
+ # --- Main Area: Chat & Generation ---
172
+ with gr.Column(scale=2):
173
+ with gr.Group():
174
+ prompt_input = gr.Textbox(
175
+ label="Positive Prompt",
176
+ placeholder="Describe your vision...",
177
+ lines=3,
178
+ show_copy_button=True
179
+ )
180
+ negative_input = gr.Textbox(
181
+ label="Negative Prompt",
182
+ value="low quality, blurry, distorted, bad anatomy",
183
+ lines=2,
184
+ visible=False # Collapsed for clean UI initially
185
+ )
186
+
187
+ with gr.Row():
188
+ toggle_neg = gr.Button("Show/Hide Negative", size="sm")
189
+ generate_btn = gr.Button("✨ Generate & Remember", variant="primary", scale=2)
190
+
191
+ # Output Area
192
+ with gr.Row():
193
+ with gr.Column():
194
+ result_image = gr.Image(label="Generated Artifact", type="pil", height=512)
195
+ status_text = gr.Textbox(label="System Status", interactive=False)
196
+
197
+ # --- State Management ---
198
+ # We use Gradio State to hold the "Memory Database" in the session
199
+ memory_state = gr.State(value=[])
200
+
201
+ # --- Event Listeners ---
202
+
203
+ # 1. Toggle Negative Prompt
204
+ toggle_neg.click(lambda: gr.Textbox(visible=True), None, negative_input)
205
+
206
+ # 2. Generate Image & Update Memory
207
+ generate_btn.click(
208
+ fn=generate_image_with_memory,
209
+ inputs=[prompt_input, negative_input, steps, cfg, seed, memory_state],
210
+ outputs=[result_image, memory_state, status_text],
211
+ api_visibility="public"
212
+ )
213
+
214
+ # 3. Update Memory Table Display whenever state changes
215
+ # We format the list of dicts into a dataframe-friendly format
216
+ def format_memory_for_display(db):
217
+ if not db:
218
+ return []
219
+ return [[row["timestamp"], row["prompt"][:50] + "...", row["seed"]] for row in db]
220
+
221
+ # We use a dummy trigger or just chain it.
222
+ # In Gradio 6, we can use the `change` event on the state itself or chain outputs.
223
+ # Here we chain the output of generate to update the view directly.
224
+ # Note: To keep it simple, we modify the generate function to return the formatted dataframe if needed,
225
+ # or we can use a separate update trigger. Let's do it inline for efficiency.
226
+
227
+ # Redefining generate to return formatted table for immediate display
228
+ def generate_with_ui_update(prompt, neg, steps, cfg, seed, db):
229
+ img, new_db, status = generate_image_with_memory(prompt, neg, steps, cfg, seed, db)
230
+ formatted_table = format_memory_for_display(new_db)
231
+ return img, new_db, status, formatted_table
232
+
233
+ generate_btn.click(
234
+ fn=generate_with_ui_update,
235
+ inputs=[prompt_input, negative_input, steps, cfg, seed, memory_state],
236
+ outputs=[result_image, memory_state, status_text, memory_display],
237
+ api_visibility="public"
238
+ )
239
+
240
+ # 4. Recall from Memory (Click on Table)
241
+ memory_display.select(
242
+ fn=recall_prompt,
243
+ inputs=[memory_state],
244
+ outputs=[prompt_input, negative_input, seed, steps, cfg],
245
+ api_visibility="private"
246
+ )
247
+
248
+ # 5. Export Memory
249
+ export_btn.click(
250
+ fn=export_memory,
251
+ inputs=[memory_state],
252
+ outputs=[download_file],
253
+ api_visibility="private"
254
+ )
255
+
256
+ # --- Launch Configuration ---
257
+ # Gradio 6 Syntax: Theme and parameters go in launch(), not Blocks()
258
+
259
+ demo.launch(
260
+ theme=gr.themes.Soft(
261
+ primary_hue="indigo",
262
+ secondary_hue="blue",
263
+ neutral_hue="slate",
264
+ font=gr.themes.GoogleFont("Inter"),
265
+ text_size="lg",
266
+ spacing_size="lg",
267
+ radius_size="md"
268
+ ).set(
269
+ button_primary_background_fill="*primary_600",
270
+ button_primary_background_fill_hover="*primary_700",
271
+ block_title_text_weight="600",
272
+ block_border_width="1px",
273
+ block_border_color="*neutral_200"
274
+ ),
275
+ footer_links=[
276
+ {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"},
277
+ {"label": "Hugging Face", "url": "https://huggingface.co"}
278
+ ],
279
+ share=False # Set to True if you want a public link immediately
280
+ )