Spaces:
Sleeping
Sleeping
| import sys | |
| import os | |
| from datetime import datetime | |
| import gradio as gr | |
| from PIL import Image | |
| import io | |
| import requests | |
| import base64 | |
| from openai import OpenAI | |
| # --- LOGGING --- | |
| def log_message(message: str): | |
| timestamp = datetime.now().strftime("%H:%M:%S") | |
| print(f"[{timestamp}] {message}") | |
| # --- CONFIGURACIÓN --- | |
| RGB_IMAGE_PATH = "rgb.png" | |
| THERMAL_IMAGE_PATH = "thermal.png" | |
| # ⚠️ Reemplaza por tu clave real (NO publicar en repos públicos) | |
| OPENAI_API_KEY = "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" | |
| #OPENAI_API_KEY = "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" | |
| # Cliente OpenAI | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| # --- UTILS --- | |
| def encode_image(image: Image.Image) -> str: | |
| """Convierte una imagen PIL a base64 string.""" | |
| buffer = io.BytesIO() | |
| image.save(buffer, format="PNG") | |
| return base64.b64encode(buffer.getvalue()).decode("utf-8") | |
| # --- GENERATION WITH TRACE --- | |
| def generate_with_trace(rgb_image: Image.Image, thermal_image: Image.Image) -> Image.Image: | |
| try: | |
| log_message("🎨 Generating with TRACE (via responses.create)...") | |
| fixed_prompt = ( | |
| "Usa la imagen RGB como base y conserva todo su fondo, iluminación y detalles intactos. " | |
| "Identifica a la persona en la imagen RGB en su posición actual. " | |
| "Ahora, ignora la persona actual que aparece también en la térmica. " | |
| "En su lugar, localiza la zona brillante residual de la térmica (la huella de calor que quedó), " | |
| "y reubica a la persona de la RGB exactamente en esa posición anterior. " | |
| "Genera una sola imagen final, fotorrealista, con la persona movida a esa posición, " | |
| "manteniendo el fondo original sin alteraciones ni elementos adicionales. Dame la imagen. Dame la imagen." | |
| ) | |
| # Codificar ambas imágenes | |
| rgb_b64 = encode_image(rgb_image) | |
| thermal_b64 = encode_image(thermal_image) | |
| response = client.responses.create( | |
| model="gpt-4.1", # o "gpt-5" si tienes acceso | |
| input=[ | |
| { | |
| "role": "user", | |
| "content": [ | |
| {"type": "input_text", "text": fixed_prompt}, | |
| {"type": "input_image", "image_url": f"data:image/png;base64,{rgb_b64}"}, | |
| {"type": "input_image", "image_url": f"data:image/png;base64,{thermal_b64}"} | |
| ], | |
| } | |
| ], | |
| tools=[{"type": "image_generation", "input_fidelity": "high", "quality": "high", "size": "1024x1024"}], | |
| ) | |
| # Extraer resultado | |
| image_data = [ | |
| output.result | |
| for output in response.output | |
| if output.type == "image_generation_call" | |
| ] | |
| if not image_data: | |
| raise ValueError("❌ No image generated") | |
| # Decodificar | |
| image_bytes = base64.b64decode(image_data[0]) | |
| generated_image = Image.open(io.BytesIO(image_bytes)) | |
| log_message("✅ Image generated successfully") | |
| return generated_image | |
| except Exception as e: | |
| log_message(f"❌ Error with TRACE: {e}") | |
| raise e | |
| # --- MAIN PIPELINE --- | |
| def generate_reconstruction(rgb_image: Image.Image, thermal_image: Image.Image, password: str): | |
| try: | |
| # Password authentication | |
| if password != "2025": | |
| return None, "❌ Incorrect password. Access denied." | |
| log_message("🎬 Starting reconstruction...") | |
| if rgb_image is None or thermal_image is None: | |
| return None, "❌ Please upload both images" | |
| generated_image = generate_with_trace(rgb_image, thermal_image) | |
| return generated_image, "✅ Image generated with TRACE!" | |
| except Exception as e: | |
| return None, f"❌ Error: {str(e)} (Check logs)" | |
| def load_default_images(): | |
| try: | |
| rgb_img = Image.open(RGB_IMAGE_PATH) if os.path.exists(RGB_IMAGE_PATH) else None | |
| thermal_img = Image.open(THERMAL_IMAGE_PATH) if os.path.exists(THERMAL_IMAGE_PATH) else None | |
| status = "✅ Images loaded." if rgb_img and thermal_img else "⚠️ Default images not found." | |
| return rgb_img, thermal_img, status | |
| except Exception as e: | |
| return None, None, f"❌ Error: {e}" | |
| # --- USER INTERFACE (GRADIO) --- | |
| def build_ui(): | |
| with gr.Blocks(title="TRACE: Spectral Imaging to Reveal Invisible Traces") as demo: | |
| gr.Markdown(""" | |
| <div style="text-align: center;"> | |
| # Physics-Based Time-Reversal in Spectral Imaging: Reconstruction of Past Scenarios | |
| **Kebin Contreras***, **Luis Toscano-Palomino†** **Mauro Dalla Mura‡**, **Jorge Bacca†** | |
| ***Physics School, Universidad Industrial de Santander** | |
| **†Department of Computer Science, Universidad Industrial de Santander** | |
| **‡CNRS, Grenoble INP, GIPSA-Lab, Université Grenoble Alpes, 38000 Grenoble, France** | |
| </div> | |
| **Abstract:** This study introduces a framework for reconstructing recent past events by analyzing residual spectral traces captured through a multimodal imaging platform that integrates thermal and RGB modalities. A physics-informed inverse problem approach is employed, incorporating heat diffusion models and deep learning techniques based on physics-informed neural networks (PINNs). The methodology is evaluated on simulated sequences from the Thermal-IM dataset, utilizing pose estimation metrics such as Percentage of Correct Keypoints (PCK) and Mean Per Joint Position Error (MPJPE), alongside object detection metrics including mean Average Precision (mAP) and Intersection-over-Union (IoU). Results demonstrate the framework's capability to infer past scene configurations, with implications for forensic analysis and environmental monitoring. | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### 📸 Input Images") | |
| rgb_input = gr.Image(label="RGB Current Scene", type="pil", height=300) | |
| thermal_input = gr.Image(label="Thermal Residual Traces", type="pil", height=300) | |
| password_input = gr.Textbox(label="Access Password", type="password", placeholder="Enter password") | |
| with gr.Column(scale=2): | |
| gr.Markdown("### 🎯 Past Reconstruction") | |
| output_image = gr.Image(label="Reconstructed Past Scene", height=400) | |
| status_text = gr.Textbox(label="Status", interactive=False) | |
| predict_btn = gr.Button("� Predict the Past", variant="primary") | |
| predict_btn.click( | |
| generate_reconstruction, | |
| inputs=[rgb_input, thermal_input, password_input], | |
| outputs=[output_image, status_text] | |
| ) | |
| return demo | |
| # --- MAIN --- | |
| if __name__ == "__main__": | |
| log_message("🚀 Starting TRACE System...") | |
| app_ui = build_ui() | |
| app_ui.launch(server_name="0.0.0.0", server_port=7860, show_error=True) | |