""" Meridian - Historical Image Generator Takes coordinates and dates, finds historical events, and generates images of those moments. Built for the Hugging Face MCP Hackathon. """ import os import warnings from datetime import datetime from typing import Dict, List, Optional, Tuple # Suppress FutureWarnings for cleaner logs warnings.filterwarnings("ignore", category=FutureWarning) import gradio as gr from PIL import Image from huggingface_hub import InferenceClient import google.generativeai as genai from services.history_service import ( build_event_context, format_event_digest, get_artifacts_for_year, get_event_by_name, get_event_by_slug, get_event_by_qid, get_events_by_coordinates, get_events_response, get_era_vocabulary, get_region_context, initialize_history, search_events_globally, ) # Import Wikidata service for MCP tools try: from services.wikidata_service import ( mcp_search_historical_events, mcp_get_event_by_qid, ) WIKIDATA_MCP_AVAILABLE = True except ImportError: WIKIDATA_MCP_AVAILABLE = False from services.prompt_parser import parse_prompt_context, ParsedPrompt def join_list(items: List[str], conjunction: str = "and") -> str: cleaned = [item for item in items if item] if not cleaned: return "" if len(cleaned) == 1: return cleaned[0] if len(cleaned) == 2: return f"{cleaned[0]} {conjunction} {cleaned[1]}" return ", ".join(cleaned[:-1]) + f", {conjunction} {cleaned[-1]}" def describe_time_of_day(hour: Optional[int]) -> str: if hour is None: return "ambient daylight with balanced, neutral lighting" hour = int(hour) % 24 if 0 <= hour < 4: return "pre-dawn darkness punctuated by sparse artificial lights" if 4 <= hour < 6: return "blue-hour twilight with faint glow along the horizon" if 6 <= hour < 10: return "soft morning light casting long, gentle shadows" if 10 <= hour < 15: return "bright midday sunlight with crisp contrast and vivid color" if 15 <= hour < 18: return "warm late-afternoon sun gilding the scene with golden highlights" if 18 <= hour < 20: return "dusk with saturated skies and streetlights beginning to glow" if 20 <= hour < 23: return "nighttime illumination from electric signage and floodlights" return "deep night with high-contrast spotlights and reflective surfaces" def describe_season(latitude: float, month: Optional[int]) -> str: if month is None or month < 1 or month > 12: return "seasonal atmosphere typical for the region" northern = latitude >= 0 if northern: if month in [12, 1, 2]: return "winter air with visible breath, bundled clothing, and muted vegetation" if month in [3, 4, 5]: return "spring freshness with blooming foliage and newly awakened colors" if month in [6, 7, 8]: return "summer warmth with lush greenery and vibrant skylines" return "autumn chill with turning leaves and cool breezes" else: if month in [12, 1, 2]: return "southern hemisphere summer with bright skies and humid air" if month in [3, 4, 5]: return "autumn hues with cooling evenings and amber light" if month in [6, 7, 8]: return "winter clarity with crisp air and softer daylight" return "spring renewal with emerging blossoms and mild winds" def add_composition_cues(mood: str, event_type: str = "") -> str: """Returns composition cues for the selected style.""" composition_map = { "Documentary": "wide angle view, documentary photography style, centered composition", "Dramatic": "dramatic perspective, cinematic framing, rule of thirds, leading lines", "Artistic": "artistic composition, balanced framing, visual depth", "Cinematic": "cinematic wide shot, dramatic shadows, foreground action, background architecture", "Photojournalistic": "photojournalistic style, candid framing, natural lighting", "Cartoon": "animated style, vibrant colors, cartoon illustration, bold outlines", "Minecraft": "blocky pixelated style, minecraft aesthetic, cubic forms, voxel art", "Retro": "vintage photography style, retro color grading, film grain, nostalgic atmosphere", "Glitch": "digital glitch art, motion blur effects, halftone dot patterns, RGB channel separation, pixelated distortion, cyberpunk aesthetic, data corruption visual effects", } base_cue = composition_map.get(mood, "wide angle view, cinematic framing") # Only add depth cues for photorealistic styles if mood in ["Documentary", "Cinematic", "Dramatic", "Photojournalistic", "Artistic"]: depth_cues = "foreground action, middle ground figures, background architecture, atmospheric depth" return f"{base_cue}, {depth_cues}" return base_cue def build_event_sections( event: dict, time_desc: str, season_desc: str, mood: str, lat: float, lon: float, year: int, ) -> Dict[str, str]: """Build VISUAL-FIRST event description for image generation. Returns concrete visual elements that image models understand. Target: 35-50 words for base prompt (style adds 15-20 more). """ event_name = event.get("name", "Historical scene") event_year = event.get("year", year) location = get_location_name(event, lat, lon) # VISUAL SUBJECT (what the image shows) subject_type = "historical scene" if "battle" in event_name.lower() or "war" in event_name.lower(): subject_type = "battlefield" elif "signing" in event_name.lower() or "declaration" in event_name.lower(): subject_type = "formal ceremony" elif "speech" in event_name.lower() or "address" in event_name.lower(): subject_type = "public gathering" elif "fall" in event_name.lower() or "liberation" in event_name.lower(): subject_type = "crowd scene" # PARTICIPANTS (who's in the image with period clothing) actors = event.get("actors") or [] participants_desc = "" if actors: # Add period-specific clothing descriptors clothing = get_period_clothing(event_year) if len(actors) == 1: participants_desc = f"{actors[0]} in {clothing}" elif len(actors) == 2: participants_desc = f"{actors[0]} and {actors[1]} in {clothing}" else: participants_desc = f"{actors[0]}, {actors[1]}, and others in {clothing}" # ENVIRONMENT (where the scene takes place) location_desc = get_environment_description(location, event_name, event_year) # VISUAL ELEMENTS (period artifacts, architecture) artifacts = event.get("artifacts") or [] visual_elements = join_list(artifacts[:3], "and") if artifacts else "" # LIGHTING (time-based atmospheric description) lighting = time_desc # Use full description return { "subject": subject_type, "participants": participants_desc, "location": location_desc, "elements": visual_elements, "lighting": lighting, "event_name": event_name, # Keep for fallback } def get_period_clothing(year: int) -> str: """Get period-appropriate clothing description.""" if year < 1500: return "medieval robes and tunics" elif year < 1700: return "Renaissance doublets and robes" elif year < 1800: return "18th century coats and breeches" elif year < 1850: return "early 19th century military uniforms" elif year < 1900: return "Victorian formal attire" elif year < 1920: return "Edwardian formal dress" elif year < 1950: return "1940s military uniforms" elif year < 1980: return "mid-century formal wear" else: return "modern formal attire" def get_environment_description(location: str, event_name: str, year: int) -> str: """Get concrete environmental description.""" if "Β°" in location: env_name = "" else: env_name = location if "battle" in event_name.lower() or "war" in event_name.lower(): if year < 1900: return f"{env_name} countryside with period cannons, military encampments, and smoke from musket fire" if env_name else "muddy battlefield with period cannons, military encampments, and gunpowder smoke" else: return f"{env_name} terrain with military vehicles, fortifications, and artillery" if env_name else "war-torn terrain with military vehicles, fortifications, and artillery smoke" elif "palace" in location.lower() or "hall" in event_name.lower(): return "grand ornate interior with period chandeliers, decorative architecture, and formal furnishings" elif "street" in event_name.lower() or "crowd" in event_name.lower() or "fall" in event_name.lower(): return f"{env_name} streets with period buildings, gathered crowds, and urban architecture" if env_name else "city streets with period buildings, gathered crowds, and architectural details" else: return f"{env_name} with historically accurate period architecture and setting" if env_name else "period-accurate setting with appropriate historical architecture" def assemble_prompt_from_sections(sections: Dict[str, str], quality: str) -> str: """Assemble VISUAL-FIRST prompt for image generation. Format: "[Subject] showing [participants]. [Environment with elements]. [Lighting]." NO metadata - only visual descriptions that models understand. """ subject = sections.get("subject", "historical scene") participants = sections.get("participants", "") location = sections.get("location", "") elements = sections.get("elements", "") lighting = sections.get("lighting", "") # Build prompt sentence by sentence parts = [] # Sentence 1: Subject + Participants if participants: parts.append(f"{subject.capitalize()} showing {participants}") else: event_name = sections.get("event_name", "historical event") parts.append(f"{subject.capitalize()} depicting {event_name}") # Sentence 2: Environment WITH Elements (combined) if location and elements: parts.append(f"{location} with {elements}") elif location: parts.append(location) elif elements: parts.append(f"Scene with {elements}") # Sentence 3: Lighting/Atmosphere if lighting: parts.append(f"{lighting}") # Join all parts with periods for clear structure final = ". ".join(parts) return final.strip() def build_fallback_prompt( lat: float, lon: float, year: int, month: int, day: int, hour: int, mood: str, ) -> Tuple[str, str]: time_desc = describe_time_of_day(hour) season_desc = describe_season(lat, month) vocab = get_era_vocabulary(year) artifact_titles = [artifact["title"] for artifact in get_artifacts_for_year(year, limit=3)] sections = { "scene": f"{mood} depiction of daily life near {lat:.2f}, {lon:.2f} in {year}.", "participants": "Participants include residents, traders, and travelers in their everyday routines.", "environment": ( f"Environment frames {vocab.get('architecture')} under {time_desc} and {season_desc}." ), "elements": ( f"Visible elements: {join_list(artifact_titles)}." if artifact_titles else f"Technology of the era includes {vocab.get('technology')} and {vocab.get('transport')}." ), } # Style-appropriate quality tags if mood in ["Cartoon", "Minecraft", "Retro", "Glitch"]: quality_map = { "Cartoon": "cartoon illustration style, vibrant colors, bold outlines, historically inspired", "Minecraft": "minecraft blocky style, pixelated, cubic forms, historically themed", "Retro": "retro vintage style, film grain, nostalgic color grading, historically accurate", "Glitch": "digital glitch art style, cyberpunk aesthetic, data corruption effects, historically themed" } quality = quality_map.get(mood, f"{mood} style, historically inspired") else: quality = f"{mood} style, historically accurate, photorealistic, 8K" prompt = assemble_prompt_from_sections(sections, quality) hint = ( f"π {lat:.4f}, {lon:.4f} | π {year}-{month:02d}-{day:02d} {hour:02d}:00\n" "βΉοΈ No curated event match found; using era vocabulary fallback." ) return prompt, hint # Load environment variables try: from dotenv import load_dotenv load_dotenv() except ImportError: pass # ========================== # SSL Certificate Fix # ========================== # Fix SSL certificate issues on macOS/Anaconda try: import certifi cert_path = certifi.where() if cert_path and os.path.exists(cert_path): os.environ.setdefault("SSL_CERT_FILE", cert_path) os.environ.setdefault("REQUESTS_CA_BUNDLE", cert_path) os.environ.setdefault("CURL_CA_BUNDLE", cert_path) except ImportError: pass # ========================== # Configuration # ========================== # Try both common environment variable names for HF token HF_TOKEN = (os.getenv("HUGGINGFACE_API_TOKEN") or os.getenv("HF_TOKEN", "")).strip() GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "").strip() # Display token status on startup print("\n" + "="*60) print("π API Token Status:") print("="*60) if HF_TOKEN: print(f"β HF_TOKEN: Found ({len(HF_TOKEN)} chars) - {HF_TOKEN[:10]}...") else: print("β HF_TOKEN: Not found!") print(" Set HUGGINGFACE_API_TOKEN or HF_TOKEN environment variable") if GEMINI_API_KEY: print(f"β GEMINI_API_KEY: Found ({len(GEMINI_API_KEY)} chars) - {GEMINI_API_KEY[:10]}...") genai.configure(api_key=GEMINI_API_KEY) else: print("β οΈ GEMINI_API_KEY: Not found (prompts will use fallback)") print("="*60 + "\n") # Image generation models IMAGE_MODELS = { "FLUX.1-dev (High Quality)": "black-forest-labs/FLUX.1-dev", "FLUX.1-schnell (Fast)": "black-forest-labs/FLUX.1-schnell", } # ========================== # Negative Prompts by Era (Prevent Anachronisms) # ========================== NEGATIVE_PROMPTS_BY_ERA = { (-5000, 500): "modern clothing, eyeglasses, wristwatches, cars, trains, airplanes, electric lights, smartphones, cameras, plastic, metal zippers, sneakers, jeans, t-shirts, hoodies, sunglasses, baseball caps, medieval armor, Renaissance art style", (500, 1500): "Renaissance art style, modern clothing, eyeglasses, wristwatches, cars, trains, airplanes, electric lights, smartphones, cameras, plastic, metal zippers, sneakers, jeans, t-shirts, hoodies, sunglasses, baseball caps, printing press, gunpowder weapons", (1500, 1800): "cars, motorcycles, airplanes, electric lights, smartphones, modern electronics, plastic materials, synthetic fabrics, LED screens, digital devices, neon signs, concrete buildings, steel skyscrapers, industrial machinery", (1800, 1900): "cars, motorcycles, airplanes, electric lights, smartphones, modern electronics, plastic materials, synthetic fabrics, LED screens, digital devices, neon signs, concrete buildings, steel skyscrapers, modern vehicles", (1900, 1940): "smartphones, laptops, computers, LED lights, jet aircraft, modern cars (post-1940), plastic bottles, digital screens, television, contemporary architecture, synthetic clothing, polyester, atomic age technology", (1940, 1970): "smartphones, tablets, computers, personal electronics, LED billboards, 1980s fashion, digital displays, modern vehicles (post-1970), synthesizers, disco fashion, platform shoes, personal computers", (1970, 2000): "smartphones, tablets, laptops, modern electronics (post-2000), contemporary fashion, LED screens, modern architecture (post-2000), flat screen monitors, wireless devices, social media", (2000, 2100): "futuristic technology, holograms, flying cars, sci-fi elements, unrealistic technology, augmented reality interfaces, cyberpunk aesthetics, AI robots" } # FLUX-specific negative prompts (always included) FLUX_NEGATIVE_BASE = "cartoon, illustration, painting, drawing, sketch, low quality, blurry, distorted, watermark, text, signature, jpeg artifacts, pixelated, bad anatomy, deformed, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck" def get_negative_prompt(year: int) -> str: """Returns negative prompt to prevent anachronisms for the given year.""" era_negatives = "" for (start, end), negatives in NEGATIVE_PROMPTS_BY_ERA.items(): if start <= year < end: era_negatives = negatives break if not era_negatives: era_negatives = NEGATIVE_PROMPTS_BY_ERA[(1970, 2000)] # Default # Combine era-specific and FLUX base negatives return f"{FLUX_NEGATIVE_BASE}, {era_negatives}" # ========================== # AI Prompt Generation # ========================== def generate_historical_prompt( lat: float, lon: float, year: int, month: int, day: int, hour: int, mood: str = "Documentary", ) -> Tuple[str, str]: """Finds historical events and builds a prompt for image generation.""" # Find events near the coordinates, prioritizing exact year matches events = get_events_by_coordinates( lat, lon, year, radius_km=400, limit=4, include_wikidata=True, year_weight=1.5, ) time_desc = describe_time_of_day(hour) season_desc = describe_season(lat, month) if events: focus_event = events[0] sections = build_event_sections(focus_event, time_desc, season_desc, mood, lat, lon, year) # Style-appropriate quality tags if mood in ["Cartoon", "Minecraft", "Retro", "Glitch"]: quality_map = { "Cartoon": "cartoon illustration style, vibrant colors, bold outlines, historically inspired", "Minecraft": "minecraft blocky style, pixelated, cubic forms, historically themed", "Retro": "retro vintage style, film grain, nostalgic color grading, historically accurate", "Glitch": "digital glitch art style, cyberpunk aesthetic, data corruption effects, historically themed" } quality = quality_map.get(mood, f"{mood} style, historically inspired") else: quality = f"{mood} style, historically accurate, photorealistic, 8K" prompt = assemble_prompt_from_sections(sections, quality) # Show where the event came from source = focus_event.get("source", "curated") source_badge = "π Curated" if source == "curated" else "π Wikidata" qid = focus_event.get("qid", "") source_info = f"{source_badge}" if qid: source_info += f" ({qid})" # Show how close the year match is year_delta = focus_event.get("year_delta", 0) if year_delta == 0: year_match = "π― Exact year match!" elif year_delta <= 2: year_match = f"π Β±{year_delta} year(s)" elif year_delta <= 5: year_match = f"π ~{year_delta} years" elif year_delta <= 10: year_match = f"π ~{year_delta} years apart" else: year_match = f"β οΈ {year_delta} years apart" hint_lines = [ f"π {lat:.4f}, {lon:.4f} | π {year}-{month:02d}-{day:02d} {hour:02d}:00", ( f"π― Focus event: {focus_event['name']} ({focus_event.get('year')}) Β· " f"{focus_event.get('distance_km')}km Β· {year_match} Β· {source_info}" ), ] summary = focus_event.get("summary") or focus_event.get("narrative") or focus_event.get("description") if summary: hint_lines.append(f"π {summary[:200]}{'...' if len(summary) > 200 else ''}") # Show participants if available participants = focus_event.get("actors") or focus_event.get("participants") or [] if participants: hint_lines.append(f"π₯ Participants: {', '.join(participants[:5])}") if len(events) > 1: hint_lines.append("π Related possibilities:") for related in events[1:]: rel_source = "π" if related.get("source") == "curated" else "π" hint_lines.append( f"- {rel_source} {related['name']} ({related.get('year')}) Β· {related.get('distance_km')}km Β· " f"conf {related.get('match_confidence', 0):.2f}" ) hint = "\n".join(hint_lines) return prompt, hint # Fallback when no curated event is close enough return build_fallback_prompt(lat, lon, year, month, day, hour, mood) # ========================== # Image Generation # ========================== def generate_image(prompt: str, model_key: str, year: int = None, negative_prompt: str = None) -> Tuple[Optional[Image.Image], str]: """Generates image using FLUX via Hugging Face API. Auto-generates negative prompts based on year if not provided.""" if not HF_TOKEN: error_msg = """β Hugging Face API token not configured! Please set one of these environment variables: β’ HUGGINGFACE_API_TOKEN='your_token_here' β’ HF_TOKEN='your_token_here' Get your token at: https://huggingface.co/settings/tokens""" return None, error_msg model_id = IMAGE_MODELS.get(model_key, IMAGE_MODELS["FLUX.1-dev (High Quality)"]) # Generate negative prompt if not provided and year is available if negative_prompt is None and year is not None: negative_prompt = get_negative_prompt(year) try: client = InferenceClient(token=HF_TOKEN) start_time = datetime.now() # Build parameters params = { "prompt": prompt, "model": model_id, "width": 1024, "height": 768, } # Add negative prompt if available (FLUX supports it) if negative_prompt: params["negative_prompt"] = negative_prompt image = client.text_to_image(**params) elapsed = (datetime.now() - start_time).total_seconds() status = f"β Generated in {elapsed:.1f}s using {model_key}" if negative_prompt: status += " (with era-appropriate exclusions)" return image, status except Exception as e: error_msg = f"β Generation failed: {str(e)}" # Add helpful context for common errors if "rate limit" in str(e).lower(): error_msg += "\n\nπ‘ Tip: Try FLUX.1-schnell (faster) or wait a few minutes." elif "timeout" in str(e).lower(): error_msg += "\n\nπ‘ Tip: Model loading. Try again in 30 seconds." return None, error_msg # ========================== # Main Workflow # ========================== def process_coordinates(lat: float, lon: float, year: int, month: int, day: int, hour: int, mood: str, model_key: str, custom_prompt: str = None): """Main workflow: takes coordinates and date, generates prompt and image.""" # Generate prompt or use custom one status_parts = [] if custom_prompt and custom_prompt.strip(): prompt = custom_prompt.strip() hint = f"π {lat:.4f}, {lon:.4f} | π {year}-{month:02d}-{day:02d} {hour}:00 (Custom prompt)" status_parts.append("β Using custom prompt") else: status_parts.append("π Searching historical events...") prompt, hint = generate_historical_prompt(lat, lon, year, month, day, hour, mood) status_parts.append("β Prompt generated") # Generate the image status_parts.append(f"π¨ Generating image with {model_key}...") image, gen_status = generate_image(prompt, model_key, year=year) status_parts.append(gen_status) # Build timeline of nearby events status_parts.append("π Building historical timeline...") events = get_events_by_coordinates( lat, lon, year, radius_km=500, limit=5, include_wikidata=True, year_weight=1.5, ) status_parts.append(f"β Found {len(events)} nearby events") status = " | ".join(status_parts) timeline_md = "### Nearby Historical Events\n\n" if events: for event in events: source = event.get("source", "curated") source_icon = "π" if source == "curated" else "π" qid = event.get("qid", "") qid_link = f" [[{qid}](https://www.wikidata.org/wiki/{qid})]" if qid else "" # Year match indicator year_delta = event.get("year_delta", 0) if year_delta == 0: year_badge = "π―" elif year_delta <= 5: year_badge = "π " else: year_badge = "β³" timeline_md += ( f"**{event.get('year')}** {year_badge} β {source_icon} {event.get('name')}{qid_link} " f"({event.get('distance_km')}km" ) if year_delta > 0: timeline_md += f" Β· Β±{year_delta}yr" timeline_md += f")\n\n" summary = event.get("summary") or event.get("narrative") or event.get("description") if summary: timeline_md += f"> {summary[:180]}{'...' if len(summary) > 180 else ''}\n\n" # Show participants for Wikidata events participants = event.get("actors") or event.get("participants") or [] if participants and source == "wikidata": timeline_md += f"_Participants: {', '.join(participants[:4])}_\n\n" else: timeline_md += "_No specific events found in database or Wikidata. Scene generated from era-appropriate context._" return image, prompt, hint, status, timeline_md # ========================== # Gradio UI # ========================== def create_app(): """Sets up the Gradio UI.""" # Initialize history dataset and store initialize_history() # Hybrid Theme: Warm Hero + Clean Readable Content # Hero uses Peach/Lavender/Sky Blue gradient # Content areas use high-contrast, professional styling custom_css = """ :root { /* Hero colors (warm palette) */ --peach: #ffad7a; --peach-dark: #e8935c; --lavender: #b8a9d9; --sky-blue: #7ACCFF; /* Content colors (high contrast, professional) */ --bg-light: #f9fafb; --surface: #ffffff; --text-primary: #1f2937; --text-secondary: #4b5563; --text-muted: #6b7280; --border-default: #e5e7eb; --border-subtle: #f3f4f6; /* Accent (peach for interactive elements) */ --accent: #ffad7a; --accent-hover: #e8935c; --accent-subtle: rgba(255, 173, 122, 0.1); /* Shadows */ --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.05); --shadow-md: 0 4px 12px rgba(0, 0, 0, 0.08); --shadow-lg: 0 8px 24px rgba(0, 0, 0, 0.12); } /* Base - Clean, readable */ body { background: var(--bg-light) !important; color: var(--text-primary) !important; font-family: -apple-system, BlinkMacSystemFont, 'SF Pro Display', 'Segoe UI', Roboto, sans-serif !important; -webkit-font-smoothing: antialiased; } .gradio-container { max-width: 100% !important; background: var(--bg-light) !important; } /* ========== HERO - Warm gradient (kept from new design) ========== */ .hero { text-align: center; padding: 2.5rem 2rem; background: linear-gradient(135deg, var(--peach) 0%, var(--lavender) 50%, var(--sky-blue) 100%); border-radius: 20px; margin: 1rem; box-shadow: var(--shadow-lg), 0 0 30px rgba(255, 173, 122, 0.2); position: relative; overflow: hidden; } .hero::before { content: ''; position: absolute; top: 0; left: 0; right: 0; bottom: 0; background: radial-gradient(ellipse at 30% 20%, rgba(255,255,255,0.35) 0%, transparent 50%); pointer-events: none; } .hero h1 { font-size: 2.75rem; font-weight: 600; color: #ffffff !important; margin-bottom: 0.5rem; text-shadow: 0 2px 8px rgba(0,0,0,0.15); letter-spacing: -0.03em; position: relative; font-family: 'Inter', 'Helvetica Neue', system-ui, sans-serif; } .hero p { color: rgba(255, 255, 255, 0.95); font-size: 1.1rem; font-weight: 450; position: relative; } /* ========== CONTENT AREAS - Clean, high contrast ========== */ /* Layout */ .main-grid { display: grid; grid-template-columns: 320px 1fr; gap: 1.5rem; padding: 1rem; } /* Sidebar - Clean white */ .sidebar { background: var(--surface); border-radius: 12px; padding: 1.5rem; border: 1px solid var(--border-default); box-shadow: var(--shadow-md); height: fit-content; position: sticky; top: 1rem; } /* Main Panel - Clean white */ .main-panel { background: var(--surface); border-radius: 12px; padding: 1.5rem; border: 1px solid var(--border-default); box-shadow: var(--shadow-md); } /* Inputs - Clean with peach focus */ input, select, textarea { background: var(--bg-light) !important; border: 1px solid var(--border-default) !important; color: var(--text-primary) !important; border-radius: 8px !important; transition: all 0.15s ease !important; } input:focus, select:focus, textarea:focus { border-color: var(--accent) !important; box-shadow: 0 0 0 3px var(--accent-subtle) !important; outline: none !important; } /* Buttons - Peach gradient (elegant touch) */ button { background: linear-gradient(135deg, var(--accent) 0%, var(--accent-hover) 100%) !important; color: #ffffff !important; font-weight: 600 !important; border: none !important; border-radius: 10px !important; padding: 0.75rem 1.5rem !important; transition: all 0.2s ease !important; box-shadow: 0 2px 8px rgba(255, 173, 122, 0.3) !important; } button:hover { background: linear-gradient(135deg, var(--accent-hover) 0%, #d67d45 100%) !important; transform: translateY(-1px) !important; box-shadow: 0 4px 16px rgba(255, 173, 122, 0.4) !important; } /* Labels - High contrast */ label { color: var(--text-secondary) !important; font-weight: 500 !important; font-size: 0.875rem !important; } /* Section headers - Clear hierarchy */ .markdown-text h3, h3 { color: var(--text-primary) !important; font-weight: 600 !important; font-size: 1rem !important; margin-bottom: 0.5rem !important; } /* All markdown text - Ensure readability */ .markdown-text, .markdown-text p, .markdown-text span { color: var(--text-primary) !important; } .markdown-text strong { color: var(--text-primary) !important; font-weight: 600 !important; } /* Image container - Enhanced with animations */ .gr-image { border-radius: 12px !important; border: 1px solid var(--border-default) !important; box-shadow: var(--shadow-md) !important; transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important; opacity: 0; animation: fadeInImage 0.6s ease-out forwards !important; } @keyframes fadeInImage { from { opacity: 0; transform: translateY(10px) scale(0.98); } to { opacity: 1; transform: translateY(0) scale(1); } } .gr-image:hover { border-color: var(--accent) !important; box-shadow: 0 8px 24px rgba(255, 173, 122, 0.3) !important; transform: translateY(-2px) scale(1.01) !important; } /* Hint/status text - Subtle background, readable text with smooth transitions */ .hint-text { font-size: 0.9rem; color: var(--text-secondary); padding: 0.875rem 1rem; background: var(--bg-light); border-radius: 8px; border: 1px solid var(--border-default); margin-top: 0.5rem; line-height: 1.6; transition: all 0.3s ease !important; animation: slideInHint 0.4s ease-out !important; } @keyframes slideInHint { from { opacity: 0; transform: translateX(-10px); } to { opacity: 1; transform: translateX(0); } } /* Accordion */ .gr-accordion { background: var(--surface) !important; border: 1px solid var(--border-default) !important; border-radius: 8px !important; } /* Blockquotes - Subtle lavender touch */ blockquote, .markdown-text blockquote { border-left: 3px solid var(--lavender) !important; background: #faf9fc !important; padding: 0.75rem 1rem !important; margin: 0.5rem 0 !important; border-radius: 0 6px 6px 0 !important; color: var(--text-secondary) !important; } /* Links - Sky blue */ a { color: #2563eb !important; text-decoration: none !important; } a:hover { color: var(--accent-hover) !important; text-decoration: underline !important; } /* Slider */ input[type="range"] { accent-color: var(--accent) !important; } /* Loading states - Enhanced */ .generating { position: relative; overflow: hidden; } .generating::after { content: ''; position: absolute; top: 0; left: -100%; width: 100%; height: 100%; background: linear-gradient(90deg, transparent, rgba(255,173,122,0.2), transparent); animation: loading 1.5s infinite; } @keyframes loading { 0% { left: -100%; } 100% { left: 100%; } } /* Skeleton loader for image placeholder */ .image-skeleton { width: 100%; height: 600px; background: linear-gradient(90deg, var(--bg-light) 0%, var(--border-subtle) 50%, var(--bg-light) 100%); background-size: 200% 100%; border-radius: 12px; animation: skeleton-loading 1.5s ease-in-out infinite; } @keyframes skeleton-loading { 0% { background-position: 200% 0; } 100% { background-position: -200% 0; } } /* Progress indicator */ .progress-bar { height: 4px; background: linear-gradient(90deg, var(--accent), var(--lavender)); border-radius: 2px; animation: progress 2s ease-in-out infinite; margin: 1rem 0; } @keyframes progress { 0%, 100% { transform: scaleX(0.3); transform-origin: left; } 50% { transform: scaleX(1); transform-origin: left; } } /* Status text with pulse animation */ .status-generating { color: var(--accent) !important; font-weight: 500 !important; animation: pulse 2s ease-in-out infinite !important; } @keyframes pulse { 0%, 100% { opacity: 1; } 50% { opacity: 0.7; } } /* Button active state */ button:active { transform: translateY(0) scale(0.98) !important; transition: transform 0.1s ease !important; } /* Panel entrance animation */ .main-panel, .sidebar { animation: fadeInPanel 0.5s ease-out !important; } @keyframes fadeInPanel { from { opacity: 0; transform: translateY(20px); } to { opacity: 1; transform: translateY(0); } } /* Smooth text transitions */ .markdown-text { transition: opacity 0.3s ease !important; } /* Footer */ footer { display: none !important; } /* Responsive */ @media (max-width: 768px) { .main-grid { grid-template-columns: 1fr; } .sidebar { position: static; } .hero h1 { font-size: 2rem; color: #ffffff !important; } } """ with gr.Blocks(title="Meridian β Historical Image Generator") as demo: # Inject custom CSS gr.HTML(f"") # Hero gr.HTML("""
Generate historically accurate scenes from geospatial coordinates and date/time inputs