import gradio as gr import os, math, tempfile import numpy as np from PIL import Image, UnidentifiedImageError # ========================================== # FLC v1.3 Logic Engine (The "Secret Sauce") # ========================================== PHI = (1.0 + 5.0**0.5) / 2.0 def fibonacci_sequence(n): fibs = [1, 2] while len(fibs) < n: fibs.append(fibs[-1] + fibs[-2]) return np.array(fibs[:n], dtype=np.int64) def fibonacci_frequency_boundaries(n_coeffs, n_bands): if n_bands < 2: return [0, n_coeffs] fibs = fibonacci_sequence(n_bands).astype(np.float64) w = fibs / (fibs.sum() + 1e-12) cum = np.cumsum(w) b = [0] for i in range(n_bands - 1): b.append(int(round(n_coeffs * cum[i]))) b.append(n_coeffs) for i in range(1, len(b)): if b[i] <= b[i-1]: b[i] = b[i-1] + 1 return b def dct_ortho_1d(x): N = x.shape[0] v = np.concatenate([x, x[::-1]]) V = np.fft.fft(v) k = np.arange(N) X = np.real(V[:N] * np.exp(-1j * np.pi * k / (2 * N))) X *= 2.0 X[0] *= (1.0 / math.sqrt(4 * N)) X[1:] *= (1.0 / math.sqrt(2 * N)) return X def idct_ortho_1d(X): N = X.shape[0] x0, xr = X[0] * math.sqrt(4 * N), X[1:] * math.sqrt(2 * N) c = np.empty(N, dtype=np.complex128) c[0], c[1:] = x0 / 2.0, xr / 2.0 k = np.arange(N) c = c * np.exp(1j * np.pi * k / (2 * N)) V = np.zeros(2 * N, dtype=np.complex128) V[:N] = c V[N+1:] = np.conj(c[1:][::-1]) return np.fft.ifft(V).real[:N] # --- Visualization Helpers --- def hologram_spectrum_image(zints): # Visualizes the frequency domain data as a 2D spectrum z = zints[:262144]; v = np.tanh(z / 32.0) theta = (2 * math.pi / (PHI**2)) * np.arange(v.size) + 2.0 * math.pi * (v * 0.25) r = 1.0 + 0.35 * np.abs(v) syms = r * np.cos(theta) + 1j * r * np.sin(theta) N = int(2**math.ceil(math.log2(math.sqrt(syms.size or 1)))) U = np.pad(syms, (0, N*N - syms.size)).reshape(N, N) mag = np.log1p(np.abs(np.fft.fftshift(np.fft.fft2(U)))) mag = (mag - mag.min()) / (mag.max() - mag.min() + 1e-12) return (mag * 255).astype(np.uint8) def bytes_to_fib_spiral_image(data): # Visualizes linear data arranged on a Fibonacci spiral tiling arr = np.frombuffer(data, dtype=np.uint8)[:262144] fibs = [1, 1] while sum(s*s for s in fibs) < arr.size: fibs.append(fibs[-1] + fibs[-2]) tiles, minx, miny, maxx, maxy, curr_x, curr_y = [], 0, 0, 0, 0, 0, 0 for i, s in enumerate(fibs): d = (i-1)%4 if i>0: if d == 0: curr_x = maxx; curr_y = miny elif d == 1: curr_x = maxx-s; curr_y = maxy elif d == 2: curr_x = minx-s; curr_y = maxy-s else: curr_x = minx; curr_y = miny-s tiles.append((curr_x, curr_y, s)) minx, miny, maxx, maxy = min(minx, curr_x), min(miny, curr_y), max(maxx, curr_x+s), max(maxy, curr_y+s) img, idx = np.zeros((maxy-miny, maxx-minx), dtype=np.uint8), 0 for x, y, s in tiles: take = min(s*s, arr.size - idx) if take <= 0: break block = np.pad(arr[idx:idx+take], (0, s*s-take)).reshape(s, s) img[img.shape[0]-(y-miny+s):img.shape[0]-(y-miny), x-minx:x-minx+s] = block idx += take return img # ========================================== # Main Processing Logic # ========================================== def run_demo(input_file_wrapper, fidelity): # Determine input type and prepare data is_image = False orig_pil = None img_dims = None try: # Try opening as an image orig_pil = Image.open(input_file_wrapper.name).convert('L') # Convert to grayscale for core engine # Resize large images for demo performance constraint orig_pil.thumbnail((512, 512)) img_dims = orig_pil.size # (width, height) raw_data = np.array(orig_pil).tobytes() is_image = True except (UnidentifiedImageError, OSError): # Fallback for non-image binary data with open(input_file_wrapper.name, "rb") as f: raw_data = f.read() orig_size = len(raw_data) # FLC Parameters based on user selection q_settings = {"High Compression (Lossy)": 6, "Balanced": 12, "Near-Lossless": 24} n_bands = q_settings[fidelity] # Aggressive steps for lower tiers to show visual difference step = 0.15 if fidelity == "High Compression (Lossy)" else (0.01 if fidelity == "Balanced" else 0.0001) # --- Step 1: Transform & Quantize (Compression Simulation) --- # Normalize data to range [-1, 1] x = (np.frombuffer(raw_data, dtype=np.uint8).astype(float) - 127.5) / 127.5 block_len = 1024 pad_len = (-x.size) % block_len X = np.pad(x, (0, pad_len)).reshape(-1, block_len) # Forward DCT C = np.array([dct_ortho_1d(b) for b in X]) # Determine Fibonacci bands bnds = fibonacci_frequency_boundaries(block_len, n_bands) # Quantize using Phi-scaling Q = np.zeros_like(C, dtype=np.int32) for bi in range(len(bnds)-1): Q[:, bnds[bi]:bnds[bi+1]] = np.round(C[:, bnds[bi]:bnds[bi+1]] / (step * (PHI**bi))) # Simulated compressed size estimate (entropy estimate) compressed_size_est = int(np.count_nonzero(Q) * 1.5) + 512 # base overhead ratio = compressed_size_est / orig_size # --- Step 2: Progressive Reconstruction (Animation) --- frames = [] final_recon_data = None # Iterate through bands to create progressive frames for t in range(1, n_bands + 1): # Partial quantization buffer Q_p = np.zeros_like(Q) for bi in range(t): Q_p[:, bnds[bi]:bnds[bi+1]] = Q[:, bnds[bi]:bnds[bi+1]] # Dequantize back to coefficients C_p = np.zeros_like(Q_p, dtype=float) for bi in range(len(bnds)-1): C_p[:, bnds[bi]:bnds[bi+1]] = Q_p[:, bnds[bi]:bnds[bi+1]] * (step * (PHI**bi)) # Inverse DCT and denormalize recon_1d = np.clip((np.array([idct_ortho_1d(B) for B in C_p]).flatten()[:orig_size] * 127.5) + 127.5, 0, 255).astype(np.uint8) if t == n_bands: final_recon_data = recon_1d # Create visualization frames h_img = Image.fromarray(hologram_spectrum_image(Q_p.flatten())).resize((256, 256)).convert("RGB") s_img = Image.fromarray(bytes_to_fib_spiral_image(recon_1d.tobytes())).resize((256, 256)).convert("RGB") # Combine into one frame frame = Image.new("RGB", (512, 280), (15, 15, 25)) frame.paste(h_img, (0, 12)); frame.paste(s_img, (256, 12)) frames.append(frame) # Save animation gif_path = tempfile.mktemp(suffix=".gif") frames[0].save(gif_path, save_all=True, append_images=frames[1:], duration=120, loop=0) stats = f"Original Size: {orig_size:,} bytes\nSimulated Compressed Size: ~{compressed_size_est:,} bytes\ncompression Ratio: {ratio:.2%}" # --- Step 3: Prepare Final Comparison Images --- recon_pil = None if is_image and final_recon_data is not None: # Reshape 1D reconstructed data back to 2D image dimensions recon_pil = Image.fromarray(final_recon_data.reshape((img_dims[1], img_dims[0]))) # Return results based on input type if is_image: return gif_path, stats, orig_pil, recon_pil else: # If not an image, return None for image image components so they don't display weirdly return gif_path, stats, None, None # ========================================== # Gradio UI Layout # ========================================== with gr.Blocks(title="FLC v1.3 | Unified Fibonacci Demo", theme=gr.themes.Soft(primary_hue="amber", neutral_hue="slate")) as demo: gr.Markdown("# 🌀 Fibonacci Lattice Compression (FLC)") gr.Markdown("Upload an image to see the **Golden Ratio** compress data and reconstruct it progressively.") with gr.Row(): with gr.Column(scale=1): with gr.Group(): file_input = gr.File(label="1. Upload Input (Image recommended)", file_count="single") radio_input = gr.Radio(["High Compression (Lossy)", "Balanced", "Near-Lossless"], value="Balanced", label="2. Select Fidelity Tier") run_btn = gr.Button("🚀 Run Holographic Compression", variant="primary") stats_output = gr.Textbox(label="Compression Metrics", interactive=False, lines=4) with gr.Column(scale=2): gr.Markdown("### 🎞️ Progressive Reconstruction Animation") gr.Markdown("_Left: Frequency Hologram filling up. Right: Data organizing into Fibonacci Spiral._") gif_output = gr.Image(label="Animation Sequence", show_label=False) gr.Markdown("---") gr.Markdown("### 🔍 Visual Verification: Original vs. Reconstructed") gr.Markdown("_Determine if the 'Secret Sauce' maintained enough quality at the chosen compression tier._") with gr.Row(): orig_image_output = gr.Image(label="Original Input (Grayscale)", type="pil", interactive=False) recon_image_output = gr.Image(label="Final Decompressed Result", type="pil", interactive=False) # Define the action run_btn.click( fn=run_demo, inputs=[file_input, radio_input], outputs=[gif_output, stats_output, orig_image_output, recon_image_output] ) if __name__ == "__main__": demo.launch()