Spaces:
Running
Running
| #!/usr/bin/env python | |
| import os | |
| import re | |
| import json | |
| import requests | |
| from collections.abc import Iterator | |
| from threading import Thread | |
| import tempfile | |
| import random | |
| from typing import Dict, List, Tuple, Optional | |
| import shutil | |
| import concurrent.futures | |
| import gradio as gr | |
| from loguru import logger | |
| import pandas as pd | |
| import PyPDF2 | |
| from PIL import Image | |
| from gradio_client import Client | |
| import time | |
| # โโโโโโโโโโโโโโโโโโ ํ๊ธโ/โํฐํธ ์ค์ โโโโโโโโโโโโโโโโโโ | |
| import matplotlib as mpl | |
| import matplotlib.font_manager as fm | |
| import subprocess | |
| NANUM_PATH = "/usr/share/fonts/truetype/nanum/NanumGothic.ttf" # ์์คํ ์ค์น ๊ฒฝ๋ก | |
| # matplotlib์ฉ ๊ธ๊ผด ๋ฑ๋ก | |
| fm.fontManager.addfont(NANUM_PATH) | |
| mpl.rcParams["font.family"] = "NanumGothic" | |
| # Graphviz๊ฐ ์ฐพ์ ์ ์๋๋ก ํฐํธ ๊ฒฝ๋ก ์ง์ | |
| os.environ["GDFONTPATH"] = "/usr/share/fonts/truetype/nanum" | |
| # ํฐํธ ์บ์ ๊ฐ์ ๊ฐฑ์ (ํ ๋ฒ๋ง ์คํ๋ผ๋ ๋ฌด๋ฐฉ) | |
| subprocess.run(["fc-cache", "-fv"]) | |
| # โโโโโโโโโโโโโโโโโโ PPTX & ๋ค์ด์ด๊ทธ๋จ ๋ผ์ด๋ธ๋ฌ๋ฆฌ โโโโโโโโโโโโโโโโโโ | |
| try: | |
| from pptx import Presentation | |
| from pptx.util import Inches, Pt | |
| from pptx.enum.text import PP_ALIGN, MSO_ANCHOR | |
| from pptx.dml.color import RGBColor | |
| from pptx.enum.shapes import MSO_SHAPE | |
| from pptx.chart.data import CategoryChartData | |
| from pptx.enum.chart import XL_CHART_TYPE, XL_LEGEND_POSITION | |
| PPTX_AVAILABLE = True | |
| except ImportError: | |
| PPTX_AVAILABLE = False | |
| logger.warning("python-pptx ๋ผ์ด๋ธ๋ฌ๋ฆฌ๊ฐ ์ค์น๋์ง ์์์ต๋๋ค. pip install python-pptx") | |
| # ๋ค์ด์ด๊ทธ๋จ ์์ฑ๊ธฐ ๋ชจ๋ import | |
| try: | |
| from concept_map_generator import generate_concept_map | |
| from synoptic_chart_generator import generate_synoptic_chart | |
| from radial_diagram_generator import generate_radial_diagram | |
| from process_flow_generator import generate_process_flow_diagram | |
| from wbs_diagram_generator import generate_wbs_diagram | |
| DIAGRAM_GENERATORS_AVAILABLE = True | |
| logger.info("๋ค์ด์ด๊ทธ๋จ ์์ฑ๊ธฐ ๋ชจ๋ ๋ก๋ ์ฑ๊ณต") | |
| except ImportError as e: | |
| DIAGRAM_GENERATORS_AVAILABLE = False | |
| logger.warning(f"๋ค์ด์ด๊ทธ๋จ ์์ฑ๊ธฐ ๋ชจ๋์ ์ฐพ์ ์ ์์ต๋๋ค: {e}") | |
| # โโโโโโโโโโโโโโโโโโ ํ๊ธ ํฐํธ ๊ฒฝ๋ก ์ค์ (๋ค์ด์ด๊ทธ๋จ์ฉ) โโโโโโโโโโโโโโโโโโ | |
| KOREAN_FONT_PATH = os.path.join(os.path.dirname(__file__), "NanumGothic-Regular.ttf") | |
| if not os.path.exists(KOREAN_FONT_PATH): | |
| # ๋ก์ปฌ ํฐํธ๊ฐ ์์ผ๋ฉด ์์คํ ์ค์น ๊ฒฝ๋ก ์ฌ์ฉ | |
| KOREAN_FONT_PATH = NANUM_PATH | |
| # ๋ค๋ฅธ ๋ชจ๋์์ ์ฌ์ฉํ ์ ์๋๋ก ํ๊ฒฝ๋ณ์ ๋ฑ๋ก | |
| os.environ["KOREAN_FONT_PATH"] = KOREAN_FONT_PATH | |
| ############################################################################## | |
| # API Configuration | |
| ############################################################################## | |
| FRIENDLI_TOKEN = os.environ.get("FRIENDLI_TOKEN") | |
| if not FRIENDLI_TOKEN: | |
| raise ValueError("Please set FRIENDLI_TOKEN environment variable") | |
| FRIENDLI_MODEL_ID = "dep89a2fld32mcm" | |
| FRIENDLI_API_URL = "https://api.friendli.ai/dedicated/v1/chat/completions" | |
| # SERPHouse API key | |
| SERPHOUSE_API_KEY = os.getenv("SERPHOUSE_API_KEY", "") | |
| if not SERPHOUSE_API_KEY: | |
| logger.warning("SERPHOUSE_API_KEY not set. Web search functionality will be limited.") | |
| ############################################################################## | |
| # AI Image Generation API Configuration - 3D Style | |
| ############################################################################## | |
| AI_IMAGE_API_URL = "http://211.233.58.201:7971/" | |
| AI_IMAGE_ENABLED = False | |
| ai_image_client = None | |
| def initialize_ai_image_api(): | |
| """AI ์ด๋ฏธ์ง ์์ฑ API ์ด๊ธฐํ (3D ์คํ์ผ)""" | |
| global AI_IMAGE_ENABLED, ai_image_client | |
| try: | |
| logger.info("Connecting to AI image generation API (3D style)...") | |
| ai_image_client = Client(AI_IMAGE_API_URL) | |
| AI_IMAGE_ENABLED = True | |
| logger.info("AI image generation API (3D style) connected successfully") | |
| return True | |
| except Exception as e: | |
| logger.error(f"Failed to connect to AI image API: {e}") | |
| AI_IMAGE_ENABLED = False | |
| return False | |
| ############################################################################## | |
| # AI Image Generation API Configuration - FLUX API | |
| ############################################################################## | |
| FLUX_API_URL = "http://211.233.58.201:7896" | |
| FLUX_API_ENABLED = False | |
| flux_api_client = None | |
| def initialize_flux_api(): | |
| """FLUX API ์ด๊ธฐํ""" | |
| global FLUX_API_ENABLED, flux_api_client | |
| try: | |
| logger.info("Connecting to FLUX API...") | |
| flux_api_client = Client(FLUX_API_URL) | |
| FLUX_API_ENABLED = True | |
| logger.info("FLUX API connected successfully") | |
| return True | |
| except Exception as e: | |
| logger.error(f"Failed to connect to FLUX API: {e}") | |
| FLUX_API_ENABLED = False | |
| return False | |
| ############################################################################## | |
| # Design Themes and Color Schemes | |
| ############################################################################## | |
| DESIGN_THEMES = { | |
| "professional": { | |
| "name": "ํ๋กํ์ ๋", | |
| "colors": { | |
| "primary": RGBColor(46, 134, 171), # #2E86AB | |
| "secondary": RGBColor(162, 59, 114), # #A23B72 | |
| "accent": RGBColor(241, 143, 1), # #F18F01 | |
| "background": RGBColor(250, 250, 250), # #FAFAFA - Lighter background | |
| "text": RGBColor(44, 44, 44), # #2C2C2C - Darker text for better contrast | |
| }, | |
| "fonts": { | |
| "title": "Arial", | |
| "subtitle": "Arial", | |
| "body": "Calibri" | |
| } | |
| }, | |
| "modern": { | |
| "name": "๋ชจ๋", | |
| "colors": { | |
| "primary": RGBColor(114, 9, 183), # #7209B7 | |
| "secondary": RGBColor(247, 37, 133), # #F72585 | |
| "accent": RGBColor(76, 201, 240), # #4CC9F0 | |
| "background": RGBColor(252, 252, 252), # #FCFCFC - Very light background | |
| "text": RGBColor(40, 40, 40), # #282828 - Dark text | |
| }, | |
| "fonts": { | |
| "title": "Arial", | |
| "subtitle": "Arial", | |
| "body": "Helvetica" | |
| } | |
| }, | |
| "nature": { | |
| "name": "์์ฐ", | |
| "colors": { | |
| "primary": RGBColor(45, 106, 79), # #2D6A4F | |
| "secondary": RGBColor(82, 183, 136), # #52B788 | |
| "accent": RGBColor(181, 233, 185), # #B5E9B9 - Softer accent | |
| "background": RGBColor(248, 252, 248), # #F8FCF8 - Light green tint | |
| "text": RGBColor(27, 38, 44), # #1B262C | |
| }, | |
| "fonts": { | |
| "title": "Georgia", | |
| "subtitle": "Verdana", | |
| "body": "Calibri" | |
| } | |
| }, | |
| "creative": { | |
| "name": "ํฌ๋ฆฌ์์ดํฐ๋ธ", | |
| "colors": { | |
| "primary": RGBColor(255, 0, 110), # #FF006E | |
| "secondary": RGBColor(251, 86, 7), # #FB5607 | |
| "accent": RGBColor(255, 190, 11), # #FFBE0B | |
| "background": RGBColor(255, 248, 240), # #FFF8F0 - Light warm background | |
| "text": RGBColor(33, 33, 33), # #212121 - Dark text on light bg | |
| }, | |
| "fonts": { | |
| "title": "Impact", | |
| "subtitle": "Arial", | |
| "body": "Segoe UI" | |
| } | |
| }, | |
| "minimal": { | |
| "name": "๋ฏธ๋๋ฉ", | |
| "colors": { | |
| "primary": RGBColor(55, 55, 55), # #373737 - Softer than pure black | |
| "secondary": RGBColor(120, 120, 120), # #787878 | |
| "accent": RGBColor(0, 122, 255), # #007AFF - Blue accent | |
| "background": RGBColor(252, 252, 252), # #FCFCFC | |
| "text": RGBColor(33, 33, 33), # #212121 | |
| }, | |
| "fonts": { | |
| "title": "Helvetica", | |
| "subtitle": "Helvetica", | |
| "body": "Arial" | |
| } | |
| } | |
| } | |
| ############################################################################## | |
| # Slide Layout Types | |
| ############################################################################## | |
| SLIDE_LAYOUTS = { | |
| "title": 0, # ์ ๋ชฉ ์ฌ๋ผ์ด๋ | |
| "title_content": 1, # ์ ๋ชฉ๊ณผ ๋ด์ฉ | |
| "section_header": 2, # ์น์ ํค๋ | |
| "two_content": 3, # 2๋จ ๋ ์ด์์ | |
| "comparison": 4, # ๋น๊ต ๋ ์ด์์ | |
| "title_only": 5, # ์ ๋ชฉ๋ง | |
| "blank": 6 # ๋น ์ฌ๋ผ์ด๋ | |
| } | |
| ############################################################################## | |
| # Emoji Bullet Points Mapping | |
| ############################################################################## | |
| def has_emoji(text: str) -> bool: | |
| """Check if text already contains emoji""" | |
| # Check for common emoji unicode ranges | |
| for char in text[:3]: # Check first 3 characters | |
| code = ord(char) | |
| # Common emoji ranges | |
| if (0x1F300 <= code <= 0x1F9FF) or \ | |
| (0x2600 <= code <= 0x26FF) or \ | |
| (0x2700 <= code <= 0x27BF) or \ | |
| (0x1F000 <= code <= 0x1F02F) or \ | |
| (0x1F0A0 <= code <= 0x1F0FF) or \ | |
| (0x1F100 <= code <= 0x1F1FF): | |
| return True | |
| return False | |
| def get_emoji_for_content(text: str) -> str: | |
| """Get relevant emoji based on content""" | |
| text_lower = text.lower() | |
| # Technology | |
| if any(word in text_lower for word in ['ai', '์ธ๊ณต์ง๋ฅ', 'ml', '๋จธ์ ๋ฌ๋', '๋ฅ๋ฌ๋', 'deep learning']): | |
| return '๐ค' | |
| elif any(word in text_lower for word in ['๋ฐ์ดํฐ', 'data', '๋ถ์', 'analysis', 'ํต๊ณ']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['์ฝ๋', 'code', 'ํ๋ก๊ทธ๋๋ฐ', 'programming', '๊ฐ๋ฐ']): | |
| return '๐ป' | |
| elif any(word in text_lower for word in ['ํด๋ผ์ฐ๋', 'cloud', '์๋ฒ', 'server']): | |
| return 'โ๏ธ' | |
| elif any(word in text_lower for word in ['๋ณด์', 'security', '์์ ', 'safety']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['๋คํธ์ํฌ', 'network', '์ฐ๊ฒฐ', 'connection', '์ธํฐ๋ท']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['๋ชจ๋ฐ์ผ', 'mobile', '์ค๋งํธํฐ', 'smartphone', '์ฑ']): | |
| return '๐ฑ' | |
| # Business | |
| elif any(word in text_lower for word in ['์ฑ์ฅ', 'growth', '์ฆ๊ฐ', 'increase', '์์น']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['๋ชฉํ', 'goal', 'target', 'ํ๊ฒ', '๋ชฉ์ ']): | |
| return '๐ฏ' | |
| elif any(word in text_lower for word in ['๋', 'money', '๋น์ฉ', 'cost', '์์ฐ', 'budget', '์์ต']): | |
| return '๐ฐ' | |
| elif any(word in text_lower for word in ['ํ', 'team', 'ํ์ ', 'collaboration', 'ํ๋ ฅ']): | |
| return '๐ฅ' | |
| elif any(word in text_lower for word in ['์๊ฐ', 'time', '์ผ์ ', 'schedule', '๊ธฐํ']): | |
| return 'โฐ' | |
| elif any(word in text_lower for word in ['์์ด๋์ด', 'idea', '์ฐฝ์', 'creative', 'ํ์ ']): | |
| return '๐ก' | |
| elif any(word in text_lower for word in ['์ ๋ต', 'strategy', '๊ณํ', 'plan']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['์ฑ๊ณต', 'success', '๋ฌ์ฑ', 'achieve']): | |
| return '๐' | |
| # Education | |
| elif any(word in text_lower for word in ['ํ์ต', 'learning', '๊ต์ก', 'education', '๊ณต๋ถ']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['์ฐ๊ตฌ', 'research', '์กฐ์ฌ', 'study', '์คํ']): | |
| return '๐ฌ' | |
| elif any(word in text_lower for word in ['๋ฌธ์', 'document', '๋ณด๊ณ ์', 'report']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['์ ๋ณด', 'information', '์ง์', 'knowledge']): | |
| return '๐' | |
| # Communication | |
| elif any(word in text_lower for word in ['์ํต', 'communication', '๋ํ', 'conversation']): | |
| return '๐ฌ' | |
| elif any(word in text_lower for word in ['์ด๋ฉ์ผ', 'email', '๋ฉ์ผ', 'mail']): | |
| return '๐ง' | |
| elif any(word in text_lower for word in ['์ ํ', 'phone', 'call', 'ํตํ']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['ํ์', 'meeting', '๋ฏธํ ', '์ปจํผ๋ฐ์ค']): | |
| return '๐' | |
| # Nature/Environment | |
| elif any(word in text_lower for word in ['ํ๊ฒฝ', 'environment', '์์ฐ', 'nature']): | |
| return '๐ฑ' | |
| elif any(word in text_lower for word in ['์ง์๊ฐ๋ฅ', 'sustainable', '์นํ๊ฒฝ', 'eco']): | |
| return 'โป๏ธ' | |
| elif any(word in text_lower for word in ['์๋์ง', 'energy', '์ ๋ ฅ', 'power']): | |
| return 'โก' | |
| elif any(word in text_lower for word in ['์ง๊ตฌ', 'earth', '์ธ๊ณ', 'world']): | |
| return '๐' | |
| # Process/Steps | |
| elif any(word in text_lower for word in ['ํ๋ก์ธ์ค', 'process', '์ ์ฐจ', 'procedure', '๋จ๊ณ']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['์ฒดํฌ', 'check', 'ํ์ธ', 'verify', '๊ฒ์ฆ']): | |
| return 'โ ' | |
| elif any(word in text_lower for word in ['์ฃผ์', 'warning', '๊ฒฝ๊ณ ', 'caution']): | |
| return 'โ ๏ธ' | |
| elif any(word in text_lower for word in ['์ค์', 'important', 'ํต์ฌ', 'key', 'ํ์']): | |
| return 'โญ' | |
| elif any(word in text_lower for word in ['์ง๋ฌธ', 'question', '๋ฌธ์', 'ask']): | |
| return 'โ' | |
| elif any(word in text_lower for word in ['ํด๊ฒฐ', 'solution', '๋ต', 'answer']): | |
| return '๐ฏ' | |
| # Actions | |
| elif any(word in text_lower for word in ['์์', 'start', '์ถ๋ฐ', 'begin']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['์๋ฃ', 'complete', '์ข ๋ฃ', 'finish']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['๊ฐ์ ', 'improve', 'ํฅ์', 'enhance']): | |
| return '๐ง' | |
| elif any(word in text_lower for word in ['๋ณํ', 'change', '๋ณ๊ฒฝ', 'modify']): | |
| return '๐' | |
| # Industries | |
| elif any(word in text_lower for word in ['์๋ฃ', 'medical', '๋ณ์', 'hospital', '๊ฑด๊ฐ']): | |
| return '๐ฅ' | |
| elif any(word in text_lower for word in ['๊ธ์ต', 'finance', '์ํ', 'bank']): | |
| return '๐ฆ' | |
| elif any(word in text_lower for word in ['์ ์กฐ', 'manufacturing', '๊ณต์ฅ', 'factory']): | |
| return '๐ญ' | |
| elif any(word in text_lower for word in ['๋์ ', 'agriculture', '๋์ฅ', 'farm']): | |
| return '๐พ' | |
| # Emotion/Status | |
| elif any(word in text_lower for word in ['ํ๋ณต', 'happy', '๊ธฐ์จ', 'joy']): | |
| return '๐' | |
| elif any(word in text_lower for word in ['์ํ', 'danger', 'risk', '๋ฆฌ์คํฌ']): | |
| return 'โก' | |
| elif any(word in text_lower for word in ['์๋ก์ด', 'new', '์ ๊ท', 'novel']): | |
| return 'โจ' | |
| # Numbers | |
| elif text_lower.startswith(('์ฒซ์งธ', 'first', '1.', '์ฒซ๋ฒ์งธ', '์ฒซ ๋ฒ์งธ')): | |
| return '1๏ธโฃ' | |
| elif text_lower.startswith(('๋์งธ', 'second', '2.', '๋๋ฒ์งธ', '๋ ๋ฒ์งธ')): | |
| return '2๏ธโฃ' | |
| elif text_lower.startswith(('์ ์งธ', 'third', '3.', '์ธ๋ฒ์งธ', '์ธ ๋ฒ์งธ')): | |
| return '3๏ธโฃ' | |
| elif text_lower.startswith(('๋ท์งธ', 'fourth', '4.', '๋ค๋ฒ์งธ', '๋ค ๋ฒ์งธ')): | |
| return '4๏ธโฃ' | |
| elif text_lower.startswith(('๋ค์ฏ์งธ', 'fifth', '5.', '๋ค์ฏ๋ฒ์งธ', '๋ค์ฏ ๋ฒ์งธ')): | |
| return '5๏ธโฃ' | |
| # Default | |
| else: | |
| return 'โถ๏ธ' | |
| ############################################################################## | |
| # Diagram Type Detection with Priority Score | |
| ############################################################################## | |
| def detect_diagram_type_with_score(title: str, content: str) -> Tuple[Optional[str], float]: | |
| """์ฌ๋ผ์ด๋ ๋ด์ฉ์ ๋ถ์ํ์ฌ ์ ์ ํ ๋ค์ด์ด๊ทธ๋จ ํ์ ๊ณผ ํ์๋ ์ ์ ๊ฒฐ์ """ | |
| combined_text = f"{title} {content}".lower() | |
| # ๊ฐ ๋ค์ด์ด๊ทธ๋จ ํ์ ๋ณ ํค์๋์ ๊ฐ์ค์น | |
| diagram_scores = { | |
| "Process Flow": 0, | |
| "WBS Diagram": 0, | |
| "Concept Map": 0, | |
| "Radial Diagram": 0, | |
| "Synoptic Chart": 0 | |
| } | |
| # Process Flow keywords with weights | |
| if any(word in combined_text for word in ['ํ๋ก์ธ์ค', 'process', '์ ์ฐจ', 'procedure']): | |
| diagram_scores["Process Flow"] += 3 | |
| if any(word in combined_text for word in ['๋จ๊ณ', 'step', 'flow', 'ํ๋ฆ']): | |
| diagram_scores["Process Flow"] += 2 | |
| if any(word in combined_text for word in ['์ํฌํ๋ก์ฐ', 'workflow', '์์', 'sequence']): | |
| diagram_scores["Process Flow"] += 2 | |
| # WBS keywords with weights | |
| if any(word in combined_text for word in ['wbs', '์์ ๋ถํด', 'ํ๋ก์ ํธ', 'project']): | |
| diagram_scores["WBS Diagram"] += 3 | |
| if any(word in combined_text for word in ['์ ๋ฌด๋ถํด', 'breakdown', '๊ตฌ์กฐ๋', '์์ ๊ตฌ์กฐ']): | |
| diagram_scores["WBS Diagram"] += 2 | |
| # Concept Map keywords with weights | |
| if any(word in combined_text for word in ['๊ฐ๋ ', 'concept', '๊ด๊ณ', 'relationship']): | |
| diagram_scores["Concept Map"] += 3 | |
| if any(word in combined_text for word in ['์ฐ๊ด', 'connection', '๋ง์ธ๋๋งต', 'mindmap']): | |
| diagram_scores["Concept Map"] += 2 | |
| if any(word in combined_text for word in ['๊ตฌ์กฐ', 'structure', '์ฒด๊ณ', 'system']): | |
| diagram_scores["Concept Map"] += 1 | |
| # Radial Diagram keywords with weights | |
| if any(word in combined_text for word in ['์ค์ฌ', 'central', '๋ฐฉ์ฌํ', 'radial']): | |
| diagram_scores["Radial Diagram"] += 3 | |
| if any(word in combined_text for word in ['ํต์ฌ', 'core', '์ฃผ์', 'main']): | |
| diagram_scores["Radial Diagram"] += 2 | |
| # Synoptic Chart keywords with weights | |
| if any(word in combined_text for word in ['๊ฐ์', 'overview', '์ ์ฒด', 'overall']): | |
| diagram_scores["Synoptic Chart"] += 3 | |
| if any(word in combined_text for word in ['์์ฝ', 'summary', '์๋ํฑ', 'synoptic']): | |
| diagram_scores["Synoptic Chart"] += 2 | |
| # ์ถ๊ฐ ์ ์ ๋ถ์ฌ: ๋ฆฌ์คํธ๋ ๊ตฌ์กฐํ๋ ๋ด์ฉ์ด ๋ง์ ๊ฒฝ์ฐ | |
| if content.count('\n-') > 3 or content.count('\nโข') > 3: | |
| diagram_scores["Concept Map"] += 1 | |
| if any(char in content for char in ['1.', '2.', '3.', 'โ ', 'โก', 'โข']): | |
| diagram_scores["Process Flow"] += 1 | |
| # ๊ฐ์ฅ ๋์ ์ ์์ ๋ค์ด์ด๊ทธ๋จ ํ์ ์ ํ | |
| max_score = max(diagram_scores.values()) | |
| if max_score > 0: | |
| best_type = max(diagram_scores.items(), key=lambda x: x[1])[0] | |
| # ํ์๋ ์ ์ ๊ณ์ฐ (0-1 ๋ฒ์) | |
| necessity_score = min(max_score / 5.0, 1.0) # ์ต๋ 5์ ์ 1.0์ผ๋ก ์ ๊ทํ | |
| return best_type, necessity_score | |
| return None, 0.0 | |
| ############################################################################## | |
| # Generate Diagram JSON using LLM | |
| ############################################################################## | |
| def generate_diagram_json(title: str, content: str, diagram_type: str) -> Optional[str]: | |
| """LLM์ ์ฌ์ฉํ์ฌ ๋ค์ด์ด๊ทธ๋จ์ฉ JSON ์์ฑ""" | |
| if not FRIENDLI_TOKEN: | |
| return None | |
| # ๋ค์ด์ด๊ทธ๋จ ํ์ ๋ณ JSON ๊ตฌ์กฐ ๊ฐ์ด๋ | |
| json_guides = { | |
| "Concept Map": """Generate a JSON for a concept map with the EXACT following structure: | |
| { | |
| "central_node": "Main Topic", | |
| "nodes": [ | |
| { | |
| "id": "node1", | |
| "label": "First Concept", | |
| "relationship": "is part of", | |
| "subnodes": [ | |
| { | |
| "id": "node1_1", | |
| "label": "Sub Concept 1", | |
| "relationship": "includes", | |
| "subnodes": [] | |
| } | |
| ] | |
| } | |
| ] | |
| }""", | |
| "Process Flow": """Generate a JSON for a process flow diagram with the EXACT following structure: | |
| { | |
| "start_node": "Start Process", | |
| "nodes": [ | |
| {"id": "step1", "label": "First Step", "type": "process"}, | |
| {"id": "step2", "label": "Decision Point", "type": "decision"}, | |
| {"id": "end", "label": "End Process", "type": "end"} | |
| ], | |
| "connections": [ | |
| {"from": "start_node", "to": "step1", "label": "Begin"}, | |
| {"from": "step1", "to": "step2", "label": "Next"}, | |
| {"from": "step2", "to": "end", "label": "Complete"} | |
| ] | |
| }""", | |
| "WBS Diagram": """Generate a JSON for a WBS diagram with the EXACT following structure: | |
| { | |
| "project_title": "Project Name", | |
| "phases": [ | |
| { | |
| "id": "phase1", | |
| "label": "Phase 1", | |
| "tasks": [ | |
| { | |
| "id": "task1_1", | |
| "label": "Task 1.1", | |
| "subtasks": [] | |
| } | |
| ] | |
| } | |
| ] | |
| }""", | |
| "Radial Diagram": """Generate a JSON for a radial diagram with the EXACT following structure: | |
| { | |
| "central_node": "Central Concept", | |
| "nodes": [ | |
| { | |
| "id": "branch1", | |
| "label": "Branch 1", | |
| "relationship": "connected to", | |
| "subnodes": [] | |
| } | |
| ] | |
| }""", | |
| "Synoptic Chart": """Generate a JSON for a synoptic chart with the EXACT following structure: | |
| { | |
| "central_node": "Chart Title", | |
| "nodes": [ | |
| { | |
| "id": "phase1", | |
| "label": "Phase 1 Name", | |
| "relationship": "starts with", | |
| "subnodes": [] | |
| } | |
| ] | |
| }""" | |
| } | |
| system_prompt = f"""You are a helpful assistant that generates JSON structures for diagrams. | |
| {json_guides.get(diagram_type, '')} | |
| Important rules: | |
| 1. Generate ONLY valid JSON without any explanation or markdown formatting | |
| 2. The JSON must follow the EXACT structure shown above | |
| 3. Create content based on the provided title and content | |
| 4. Use the user's language (Korean or English) for the content values | |
| 5. Keep it simple with 3-5 main nodes/steps | |
| 6. For Process Flow: 'type' can be "process", "decision", "start", "end" | |
| 7. Ensure all connections reference existing node IDs""" | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": f"Create a {diagram_type} JSON for:\nTitle: {title}\nContent: {content}"} | |
| ] | |
| headers = { | |
| "Authorization": f"Bearer {FRIENDLI_TOKEN}", | |
| "Content-Type": "application/json" | |
| } | |
| payload = { | |
| "model": FRIENDLI_MODEL_ID, | |
| "messages": messages, | |
| "max_tokens": 1000, | |
| "temperature": 0.7, | |
| "stream": False | |
| } | |
| try: | |
| response = requests.post(FRIENDLI_API_URL, headers=headers, json=payload, timeout=30) | |
| if response.status_code == 200: | |
| response_data = response.json() | |
| if 'choices' in response_data and len(response_data['choices']) > 0: | |
| content = response_data['choices'][0]['message']['content'] | |
| # Extract JSON from response | |
| content = content.strip() | |
| if content.startswith("```json"): | |
| content = content[7:] | |
| if content.startswith("```"): | |
| content = content[3:] | |
| if content.endswith("```"): | |
| content = content[:-3] | |
| # Validate JSON | |
| json.loads(content) # This will raise exception if invalid | |
| return content | |
| except Exception as e: | |
| logger.error(f"Error generating diagram JSON: {e}") | |
| return None | |
| ############################################################################## | |
| # Generate Diagram using Local Generators with Korean Font | |
| ############################################################################## | |
| def generate_diagram_locally(json_data: str, diagram_type: str, output_format: str = "png") -> Optional[str]: | |
| """๋ก์ปฌ ์์ฑ๊ธฐ๋ฅผ ์ฌ์ฉํ์ฌ ๋ค์ด์ด๊ทธ๋จ ์์ฑ (ํ๊ธ ํฐํธ ์ ์ฉ)""" | |
| if not DIAGRAM_GENERATORS_AVAILABLE: | |
| logger.error("๋ค์ด์ด๊ทธ๋จ ์์ฑ๊ธฐ ๋ชจ๋์ ์ฌ์ฉํ ์ ์์ต๋๋ค") | |
| return None | |
| try: | |
| # ํ๊ธ ํฐํธ ๊ฒฝ๋ก๋ฅผ ํ๊ฒฝ๋ณ์๋ก ์ค์ | |
| os.environ['KOREAN_FONT_PATH'] = KOREAN_FONT_PATH | |
| # ์ ์ ํ ์์ฑ๊ธฐ๋ฅผ ์ฌ์ฉํ์ฌ ๋ค์ด์ด๊ทธ๋จ ์์ฑ | |
| if diagram_type == "Concept Map": | |
| result = generate_concept_map(json_data, output_format) | |
| elif diagram_type == "Synoptic Chart": | |
| result = generate_synoptic_chart(json_data, output_format) | |
| elif diagram_type == "Radial Diagram": | |
| result = generate_radial_diagram(json_data, output_format) | |
| elif diagram_type == "Process Flow": | |
| result = generate_process_flow_diagram(json_data, output_format) | |
| elif diagram_type == "WBS Diagram": | |
| result = generate_wbs_diagram(json_data, output_format) | |
| else: | |
| logger.error(f"Unknown diagram type: {diagram_type}") | |
| return None | |
| # ๊ฒฐ๊ณผ๊ฐ ๋ฌธ์์ด์ด๊ณ ์๋ฌ ๋ฉ์์ง์ธ ๊ฒฝ์ฐ | |
| if isinstance(result, str) and result.startswith("Error:"): | |
| logger.error(f"Diagram generation error: {result}") | |
| return None | |
| # ์ฑ๊ณต์ ์ผ๋ก ์์ฑ๋ ๊ฒฝ์ฐ ํ์ผ ๊ฒฝ๋ก ๋ฐํ | |
| return result | |
| except Exception as e: | |
| logger.error(f"Failed to generate diagram locally: {e}") | |
| return None | |
| ############################################################################## | |
| # FLUX Image Prompt Generator โ supports 6 diagram styles | |
| ############################################################################## | |
| # โโโโโโโโโโโโโโโโโโ Prompt Templates (6 Styles) โโโโโโโโโโโโโโโโโโ | |
| EXAMPLE_PROMPTS: dict[str, str] = { | |
| "Product Design": ( | |
| "A sleek industrial product-design sketch.\n" | |
| "{nodes}" | |
| ), | |
| "Mindmap": ( | |
| "A hand-drawn colorful mind-map, educational style, clear hierarchy.\n" | |
| "{nodes}" | |
| ), | |
| "Mockup": ( | |
| "A clean hand-drawn wire-frame for a mobile banking app.\n" | |
| "{nodes}" | |
| ), | |
| "Infographic": ( | |
| "A flat corporate infographic โ โGlobal Renewable Energy Trends 2025โ.\n" | |
| "{nodes}" | |
| ), | |
| "Diagram": ( | |
| "A hand-drawn business process diagram.\n" | |
| "{nodes}" | |
| ), | |
| "Flowchart": ( | |
| "A vibrant hand-drawn flow-chart.\n" | |
| "{nodes}" | |
| ), | |
| } | |
| STYLE_KEYS = list(EXAMPLE_PROMPTS.keys()) | |
| def pick_flux_style(slide_idx: int) -> str: | |
| """ | |
| ์ฌ๋ผ์ด๋ ๋ฒํธ(i)๋ฅผ 6๊ฐ์ง FLUX ์คํ์ผ ํค | |
| ('Product Design' โฆ 'Flowchart') ์ค ํ๋๋ก ๋งคํํ๋ค. | |
| """ | |
| return STYLE_KEYS[slide_idx % len(STYLE_KEYS)] | |
| # ------------------------------------------------------------------ | |
| # 2) generate_flux_prompt ํจ์ ์ ์ฒด โ ์์ ์๋ฃ | |
| # ------------------------------------------------------------------ | |
| def generate_flux_prompt(title: str, content: str, style_key: str) -> str: | |
| """ | |
| Build a FLUX image-generation prompt for one slide, using six pre-defined | |
| visual styles (Product Design, Mindmap, Mockup, Infographic, Diagram, | |
| Flowchart). `content` is the raw bullet-point block; โ+โ/โ-โ/โโขโ/โโโ ๋ฑ | |
| ๋ชจ๋ ๊ธ๋จธ๋ฆฌ ๊ธฐํธ๋ฅผ ์ฒ๋ฆฌํ๋ค. | |
| """ | |
| # 1) clean bullet points (max 8) | |
| bullets = [ | |
| re.sub(r'^[\+\-\โข\โ]\s*', '', line).strip() | |
| for line in content.splitlines() | |
| if line.lstrip().startswith(('+', '-', 'โข', 'โ')) | |
| ][:8] | |
| if bullets: | |
| nodes_block = '\n'.join(f"- {b}" for b in bullets) | |
| tree_block = '\n'.join( | |
| f"{'โโโ' if i < len(bullets)-1 else 'โโโ'} {b}" | |
| for i, b in enumerate(bullets) | |
| ) | |
| else: | |
| nodes_block = "- (no explicit bullet points) -" | |
| tree_block = nodes_block | |
| # 2) choose template & inject nodes | |
| template = EXAMPLE_PROMPTS.get(style_key, EXAMPLE_PROMPTS["Diagram"]) | |
| prompt_body = template.format(nodes=nodes_block, tree=tree_block) | |
| # 3) stylistic tail (โค120 words total) | |
| tail = ( | |
| "Corporate palette, white background, hand-drawn line style, " | |
| "clean composition, high-resolution vector." | |
| ) | |
| return f"{prompt_body}\n\n{tail}".strip() | |
| def generate_flux_image_via_api(prompt: str) -> Optional[str]: | |
| """FLUX API๋ฅผ ํตํด ์ด๋ฏธ์ง ์์ฑ""" | |
| if not FLUX_API_ENABLED or not flux_api_client: | |
| return None | |
| try: | |
| logger.info(f"Generating FLUX image with prompt: {prompt[:100]}...") | |
| result = flux_api_client.predict( | |
| prompt=prompt, | |
| width=768, | |
| height=768, | |
| guidance=3.5, | |
| inference_steps=8, | |
| seed=random.randint(1, 1000000), | |
| do_img2img=False, | |
| init_image=None, | |
| image2image_strength=0.8, | |
| resize_img=True, | |
| api_name="/generate_image" | |
| ) | |
| if isinstance(result, tuple) and len(result) > 0: | |
| image_path = result[0] | |
| if image_path and os.path.exists(image_path): | |
| # PNG๋ก ๋ณํ | |
| with Image.open(image_path) as img: | |
| png_tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".png") | |
| img.save(png_tmp.name, format="PNG") | |
| logger.info(f"FLUX image generated and saved to {png_tmp.name}") | |
| return png_tmp.name | |
| return None | |
| except Exception as e: | |
| logger.error(f"Failed to generate FLUX image: {e}") | |
| return None | |
| ############################################################################## | |
| # Icon and Shape Mappings | |
| ############################################################################## | |
| SHAPE_ICONS = { | |
| "๋ชฉํ": MSO_SHAPE.STAR_5_POINT, | |
| "ํ๋ก์ธ์ค": MSO_SHAPE.BLOCK_ARC, | |
| "์ฑ์ฅ": MSO_SHAPE.UP_ARROW, | |
| "์์ด๋์ด": MSO_SHAPE.LIGHTNING_BOLT, | |
| "์ฒดํฌ": MSO_SHAPE.RECTANGLE, | |
| "์ฃผ์": MSO_SHAPE.DIAMOND, | |
| "์ง๋ฌธ": MSO_SHAPE.OVAL, | |
| "๋ถ์": MSO_SHAPE.PENTAGON, | |
| "์๊ฐ": MSO_SHAPE.DONUT, | |
| "ํ": MSO_SHAPE.HEXAGON, | |
| } | |
| ############################################################################## | |
| # File Processing Constants | |
| ############################################################################## | |
| MAX_FILE_SIZE = 30 * 1024 * 1024 # 30MB | |
| MAX_CONTENT_CHARS = 6000 | |
| ############################################################################## | |
| # Improved Keyword Extraction | |
| ############################################################################## | |
| def extract_keywords(text: str, top_k: int = 5) -> str: | |
| """ | |
| Extract keywords: supports English and Korean | |
| """ | |
| stop_words = {'์', '๋', '์ด', '๊ฐ', '์', '๋ฅผ', '์', '์', '์์', | |
| 'the', 'is', 'at', 'on', 'in', 'a', 'an', 'and', 'or', 'but'} | |
| text = re.sub(r"[^a-zA-Z0-9๊ฐ-ํฃ\s]", "", text) | |
| tokens = text.split() | |
| key_tokens = [ | |
| token for token in tokens | |
| if token.lower() not in stop_words and len(token) > 1 | |
| ][:top_k] | |
| return " ".join(key_tokens) | |
| ############################################################################## | |
| # File Size Validation | |
| ############################################################################## | |
| def validate_file_size(file_path: str) -> bool: | |
| """Check if file size is within limits""" | |
| try: | |
| file_size = os.path.getsize(file_path) | |
| return file_size <= MAX_FILE_SIZE | |
| except: | |
| return False | |
| ############################################################################## | |
| # Web Search Function | |
| ############################################################################## | |
| def do_web_search(query: str, use_korean: bool = False) -> str: | |
| """ | |
| Search web and return top 20 organic results | |
| """ | |
| if not SERPHOUSE_API_KEY: | |
| return "Web search unavailable. API key not configured." | |
| try: | |
| url = "https://api.serphouse.com/serp/live" | |
| params = { | |
| "q": query, | |
| "domain": "google.com", | |
| "serp_type": "web", | |
| "device": "desktop", | |
| "lang": "ko" if use_korean else "en", | |
| "num": "20" | |
| } | |
| headers = { | |
| "Authorization": f"Bearer {SERPHOUSE_API_KEY}" | |
| } | |
| logger.info(f"Calling SerpHouse API... Query: {query}") | |
| response = requests.get(url, headers=headers, params=params, timeout=30) | |
| response.raise_for_status() | |
| data = response.json() | |
| # Parse results | |
| results = data.get("results", {}) | |
| organic = None | |
| if isinstance(results, dict) and "organic" in results: | |
| organic = results["organic"] | |
| elif isinstance(results, dict) and "results" in results: | |
| if isinstance(results["results"], dict) and "organic" in results["results"]: | |
| organic = results["results"]["organic"] | |
| elif "organic" in data: | |
| organic = data["organic"] | |
| if not organic: | |
| return "No search results found or unexpected API response structure." | |
| max_results = min(20, len(organic)) | |
| limited_organic = organic[:max_results] | |
| summary_lines = [] | |
| for idx, item in enumerate(limited_organic, start=1): | |
| title = item.get("title", "No title") | |
| link = item.get("link", "#") | |
| snippet = item.get("snippet", "No description") | |
| displayed_link = item.get("displayed_link", link) | |
| summary_lines.append( | |
| f"### Result {idx}: {title}\n\n" | |
| f"{snippet}\n\n" | |
| f"**Source**: [{displayed_link}]({link})\n\n" | |
| f"---\n" | |
| ) | |
| instructions = """ | |
| # Web Search Results | |
| Below are the search results. Use this information when answering questions: | |
| 1. Reference the title, content, and source links | |
| 2. Explicitly cite sources in your answer (e.g., "According to source X...") | |
| 3. Include actual source links in your response | |
| 4. Synthesize information from multiple sources | |
| """ | |
| search_results = instructions + "\n".join(summary_lines) | |
| return search_results | |
| except requests.exceptions.Timeout: | |
| logger.error("Web search timeout") | |
| return "Web search timed out. Please try again." | |
| except requests.exceptions.RequestException as e: | |
| logger.error(f"Web search network error: {e}") | |
| return "Network error during web search." | |
| except Exception as e: | |
| logger.error(f"Web search failed: {e}") | |
| return f"Web search failed: {str(e)}" | |
| ############################################################################## | |
| # File Analysis Functions | |
| ############################################################################## | |
| def analyze_csv_file(path: str) -> str: | |
| """Analyze CSV file with size validation and encoding handling""" | |
| if not validate_file_size(path): | |
| return f"โ ๏ธ Error: File size exceeds {MAX_FILE_SIZE/1024/1024:.1f}MB limit." | |
| try: | |
| encodings = ['utf-8', 'cp949', 'euc-kr', 'latin-1'] | |
| df = None | |
| for encoding in encodings: | |
| try: | |
| df = pd.read_csv(path, encoding=encoding, nrows=50) | |
| break | |
| except UnicodeDecodeError: | |
| continue | |
| if df is None: | |
| return f"Failed to read CSV: Unsupported encoding" | |
| total_rows = len(pd.read_csv(path, encoding=encoding, usecols=[0])) | |
| if df.shape[1] > 10: | |
| df = df.iloc[:, :10] | |
| summary = f"**Data size**: {total_rows} rows x {df.shape[1]} columns\n" | |
| summary += f"**Showing**: Top {min(50, total_rows)} rows\n" | |
| summary += f"**Columns**: {', '.join(df.columns)}\n\n" | |
| # Extract data for charts | |
| chart_data = { | |
| "columns": list(df.columns), | |
| "sample_data": df.head(10).to_dict('records') | |
| } | |
| df_str = df.to_string() | |
| if len(df_str) > MAX_CONTENT_CHARS: | |
| df_str = df_str[:MAX_CONTENT_CHARS] + "\n...(truncated)..." | |
| return f"**[CSV File: {os.path.basename(path)}]**\n\n{summary}{df_str}\n\nCHART_DATA:{json.dumps(chart_data)}" | |
| except Exception as e: | |
| logger.error(f"CSV read error: {e}") | |
| return f"Failed to read CSV file ({os.path.basename(path)}): {str(e)}" | |
| def analyze_txt_file(path: str) -> str: | |
| """Analyze text file with automatic encoding detection""" | |
| if not validate_file_size(path): | |
| return f"โ ๏ธ Error: File size exceeds {MAX_FILE_SIZE/1024/1024:.1f}MB limit." | |
| encodings = ['utf-8', 'cp949', 'euc-kr', 'latin-1', 'utf-16'] | |
| for encoding in encodings: | |
| try: | |
| with open(path, "r", encoding=encoding) as f: | |
| text = f.read() | |
| file_size = os.path.getsize(path) | |
| size_info = f"**File size**: {file_size/1024:.1f}KB\n\n" | |
| if len(text) > MAX_CONTENT_CHARS: | |
| text = text[:MAX_CONTENT_CHARS] + "\n...(truncated)..." | |
| return f"**[TXT File: {os.path.basename(path)}]**\n\n{size_info}{text}" | |
| except UnicodeDecodeError: | |
| continue | |
| return f"Failed to read text file ({os.path.basename(path)}): Unsupported encoding" | |
| def pdf_to_markdown(pdf_path: str) -> str: | |
| """Convert PDF to markdown with improved error handling""" | |
| if not validate_file_size(pdf_path): | |
| return f"โ ๏ธ Error: File size exceeds {MAX_FILE_SIZE/1024/1024:.1f}MB limit." | |
| text_chunks = [] | |
| try: | |
| with open(pdf_path, "rb") as f: | |
| reader = PyPDF2.PdfReader(f) | |
| total_pages = len(reader.pages) | |
| max_pages = min(5, total_pages) | |
| text_chunks.append(f"**Total pages**: {total_pages}") | |
| text_chunks.append(f"**Showing**: First {max_pages} pages\n") | |
| for page_num in range(max_pages): | |
| try: | |
| page = reader.pages[page_num] | |
| page_text = page.extract_text() or "" | |
| page_text = page_text.strip() | |
| if page_text: | |
| if len(page_text) > MAX_CONTENT_CHARS // max_pages: | |
| page_text = page_text[:MAX_CONTENT_CHARS // max_pages] + "...(truncated)" | |
| text_chunks.append(f"## Page {page_num+1}\n\n{page_text}\n") | |
| except Exception as e: | |
| text_chunks.append(f"## Page {page_num+1}\n\nFailed to read page: {str(e)}\n") | |
| if total_pages > max_pages: | |
| text_chunks.append(f"\n...({max_pages}/{total_pages} pages shown)...") | |
| except Exception as e: | |
| logger.error(f"PDF read error: {e}") | |
| return f"Failed to read PDF file ({os.path.basename(pdf_path)}): {str(e)}" | |
| full_text = "\n".join(text_chunks) | |
| if len(full_text) > MAX_CONTENT_CHARS: | |
| full_text = full_text[:MAX_CONTENT_CHARS] + "\n...(truncated)..." | |
| return f"**[PDF File: {os.path.basename(pdf_path)}]**\n\n{full_text}" | |
| ############################################################################## | |
| # AI Image Generation Functions using Multiple APIs | |
| ############################################################################## | |
| def generate_diverse_prompt(title: str, content: str, slide_index: int) -> Tuple[str, str]: | |
| """์ฌ๋ผ์ด๋๋ณ๋ก ๋ค์ํ ํ๋กฌํํธ ์์ฑ - 3D์ ํฌํ ๋ฆฌ์ผ๋ฆฌ์คํฑ ๋ฒ์ """ | |
| # ์ฃผ์ ํค์๋ ์ถ์ถ | |
| keywords = extract_keywords(f"{title} {content}", top_k=5).split() | |
| # ์ฌ๋ผ์ด๋ ์ธ๋ฑ์ค์ ๋ฐ๋ผ ๋ค์ํ ์คํ์ผ ์ ์ฉ | |
| styles_3d = [ | |
| "isometric 3D illustration", | |
| "low poly 3D art", | |
| "3D cartoon style", | |
| "3D glass morphism", | |
| "3D neon glow effect", | |
| "3D paper cut art", | |
| "3D clay render", | |
| "3D geometric abstract" | |
| ] | |
| styles_photo = [ | |
| "professional photography", | |
| "cinematic shot", | |
| "minimalist photography", | |
| "aerial view photograph", | |
| "macro photography", | |
| "dramatic lighting photo", | |
| "architectural photography", | |
| "lifestyle photography" | |
| ] | |
| # ๋ด์ฉ ๊ธฐ๋ฐ ์๊ฐ ๋ฉํํฌ ์ ํ | |
| visual_metaphors = [] | |
| content_lower = (title + " " + content).lower() | |
| if any(word in content_lower for word in ['์ฑ์ฅ', 'growth', '์ฆ๊ฐ', 'increase']): | |
| visual_metaphors = ["ascending stairs", "growing tree", "rocket launch", "mountain peak", "rising graph"] | |
| elif any(word in content_lower for word in ['ํ์ ', 'innovation', '์ฐฝ์', 'creative']): | |
| visual_metaphors = ["lightbulb moment", "puzzle pieces connecting", "spark of genius", "breaking boundaries", "colorful explosion"] | |
| elif any(word in content_lower for word in ['ํ์ ', 'collaboration', 'ํ', 'team']): | |
| visual_metaphors = ["hands joining together", "connected network", "team huddle", "bridge building", "interlocking gears"] | |
| elif any(word in content_lower for word in ['๋ฐ์ดํฐ', 'data', '๋ถ์', 'analysis']): | |
| visual_metaphors = ["data visualization", "digital dashboard", "flowing data streams", "analytical charts", "information network"] | |
| elif any(word in content_lower for word in ['๋ฏธ๋', 'future', '์ ๋ง', 'vision']): | |
| visual_metaphors = ["horizon view", "crystal ball", "futuristic cityscape", "pathway to tomorrow", "digital transformation"] | |
| elif any(word in content_lower for word in ['ํ๋ก์ธ์ค', 'process', '๋จ๊ณ', 'step']): | |
| visual_metaphors = ["flowing river", "assembly line", "domino effect", "clockwork mechanism", "journey path"] | |
| elif any(word in content_lower for word in ['๋ชฉํ', 'goal', '์ฑ๊ณต', 'success']): | |
| visual_metaphors = ["target with arrow", "trophy on pedestal", "finish line", "mountain summit", "golden key"] | |
| else: | |
| visual_metaphors = ["abstract shapes", "dynamic composition", "symbolic representation", "conceptual art", "modern design"] | |
| # ์คํ์ผ๊ณผ ๋ฉํํฌ ์ ํ | |
| style_3d = styles_3d[slide_index % len(styles_3d)] | |
| style_photo = styles_photo[slide_index % len(styles_photo)] | |
| metaphor = random.choice(visual_metaphors) | |
| # ์์ ํ๋ ํธ ๋ค์ํ | |
| color_palettes = [ | |
| "vibrant blue and orange", | |
| "elegant purple and gold", | |
| "fresh green and white", | |
| "bold red and black", | |
| "soft pastel tones", | |
| "monochromatic blue", | |
| "warm sunset colors", | |
| "cool ocean palette" | |
| ] | |
| colors = color_palettes[slide_index % len(color_palettes)] | |
| # 3D ์คํ์ผ ํ๋กฌํํธ (ํ๊ธ) | |
| prompt_3d = f"wbgmsst, {style_3d}, {metaphor} representing {' '.join(keywords[:3])}, {colors}, professional presentation slide, high quality, white background" | |
| # ํฌํ ๋ฆฌ์ผ๋ฆฌ์คํฑ ํ๋กฌํํธ (์์ด) | |
| prompt_photo = f"{style_photo} of {metaphor} symbolizing {' '.join(keywords[:3])}, {colors} color scheme, professional business context, clean composition, high resolution" | |
| return prompt_3d, prompt_photo | |
| def generate_cover_image_prompts(topic: str, slides_data: list) -> Tuple[str, str]: | |
| """ํ์ง์ฉ 3D์ ํฌํ ๋ฆฌ์ผ๋ฆฌ์คํฑ ํ๋กฌํํธ ์์ฑ""" | |
| keywords = extract_keywords(topic, top_k=3).split() | |
| # ์ฃผ์ ๋ณ ํนํ๋ ์๊ฐ ์์ | |
| topic_lower = topic.lower() | |
| if any(word in topic_lower for word in ['๊ธฐ์ ', 'tech', 'ai', '์ธ๊ณต์ง๋ฅ']): | |
| visual_3d = "futuristic 3D holographic interface" | |
| visual_photo = "modern technology workspace with holographic displays" | |
| elif any(word in topic_lower for word in ['๋น์ฆ๋์ค', 'business', '๊ฒฝ์']): | |
| visual_3d = "3D corporate building with glass architecture" | |
| visual_photo = "professional business meeting in modern office" | |
| elif any(word in topic_lower for word in ['๊ต์ก', 'education', 'ํ์ต']): | |
| visual_3d = "3D books transforming into knowledge symbols" | |
| visual_photo = "inspiring educational environment with digital elements" | |
| elif any(word in topic_lower for word in ['ํ๊ฒฝ', 'environment', '์์ฐ']): | |
| visual_3d = "3D earth with renewable energy icons" | |
| visual_photo = "pristine nature landscape with sustainable elements" | |
| else: | |
| visual_3d = "abstract 3D geometric composition" | |
| visual_photo = "professional abstract photography" | |
| prompt_3d = f"wbgmsst, {visual_3d}, {' '.join(keywords)} theme, premium 3D render, elegant composition, gradient background" | |
| prompt_photo = f"{visual_photo} featuring {' '.join(keywords)}, cinematic lighting, professional presentation cover, high-end photography" | |
| return prompt_3d, prompt_photo | |
| def generate_conclusion_image_prompts(title: str, content: str) -> Tuple[str, str]: | |
| """๊ฒฐ๋ก ์ฌ๋ผ์ด๋์ฉ ํน๋ณํ ํ๋กฌํํธ ์์ฑ""" | |
| keywords = extract_keywords(f"{title} {content}", top_k=4).split() | |
| # ๊ฒฐ๋ก ์คํ์ผ ๋น์ฃผ์ผ | |
| prompt_3d = f"wbgmsst, 3D trophy or achievement symbol, {' '.join(keywords[:2])} success visualization, golden lighting, celebration mood, premium quality" | |
| prompt_photo = f"inspirational sunrise or horizon view symbolizing {' '.join(keywords[:2])}, bright future ahead, professional photography, uplifting atmosphere" | |
| return prompt_3d, prompt_photo | |
| def generate_ai_image_via_3d_api(prompt: str) -> Optional[str]: | |
| """3D ์คํ์ผ API๋ฅผ ํตํด ์ด๋ฏธ์ง ์์ฑ""" | |
| if not AI_IMAGE_ENABLED or not ai_image_client: | |
| return None | |
| try: | |
| logger.info(f"Generating 3D style image with prompt: {prompt[:100]}...") | |
| result = ai_image_client.predict( | |
| height=1024.0, | |
| width=1024.0, | |
| steps=8.0, | |
| scales=3.5, | |
| prompt=prompt, | |
| seed=float(random.randint(0, 1000000)), | |
| api_name="/process_and_save_image" | |
| ) | |
| # ๊ฒฐ๊ณผ ์ฒ๋ฆฌ | |
| image_path = None | |
| if isinstance(result, dict): | |
| image_path = result.get("path") | |
| elif isinstance(result, str): | |
| image_path = result | |
| if image_path and os.path.exists(image_path): | |
| # PNG๋ก ๋ณํ | |
| with Image.open(image_path) as img: | |
| png_tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".png") | |
| img.save(png_tmp.name, format="PNG") | |
| logger.info(f"3D image generated and saved to {png_tmp.name}") | |
| return png_tmp.name | |
| return None | |
| except Exception as e: | |
| logger.error(f"Failed to generate 3D image: {e}") | |
| return None | |
| def generate_images_parallel(prompt_3d: str, prompt_photo: str) -> Tuple[Optional[str], Optional[str]]: | |
| """๋ API๋ฅผ ๋ณ๋ ฌ๋ก ํธ์ถํ์ฌ ์ด๋ฏธ์ง ์์ฑ""" | |
| import concurrent.futures | |
| with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: | |
| # ๋ API๋ฅผ ๋์์ ํธ์ถ | |
| future_3d = executor.submit(generate_ai_image_via_3d_api, prompt_3d) | |
| future_photo = executor.submit(generate_flux_image_via_api, prompt_photo) | |
| # ๊ฒฐ๊ณผ ๋๊ธฐ | |
| image_3d = future_3d.result() | |
| image_photo = future_photo.result() | |
| return image_3d, image_photo | |
| ############################################################################## | |
| # PPT Generation Functions - FIXED WITH LIMITED DIAGRAMS AND IMAGES | |
| ############################################################################## | |
| def parse_llm_ppt_response(response: str, layout_style: str = "consistent") -> list: | |
| """Parse LLM response to extract slide content - FIXED VERSION""" | |
| slides = [] | |
| logger.info(f"Parsing LLM response, total length: {len(response)}") | |
| logger.debug(f"First 500 chars: {response[:500]}") | |
| # Try JSON parsing first | |
| try: | |
| json_match = re.search(r'\[[\s\S]*\]', response) | |
| if json_match: | |
| slides_data = json.loads(json_match.group()) | |
| return slides_data | |
| except: | |
| pass | |
| # ๋ ์ ํํ ์ฌ๋ผ์ด๋ ๊ตฌ๋ถ ํจํด | |
| # "์ฌ๋ผ์ด๋ 1", "์ฌ๋ผ์ด๋ 2" ๋๋ "Slide 1", "Slide 2" ํ์์ ์ฐพ์ | |
| slide_markers = [] | |
| # ์ฌ๋ผ์ด๋ ๋ง์ปค์ ์์น๋ฅผ ๋จผ์ ์ฐพ์ | |
| for match in re.finditer(r'^(?:์ฌ๋ผ์ด๋|Slide)\s*(\d+)\s*$', response, re.MULTILINE): | |
| slide_markers.append({ | |
| 'index': int(match.group(1)), | |
| 'start': match.start(), | |
| 'end': match.end() | |
| }) | |
| logger.info(f"Found {len(slide_markers)} slide markers") | |
| # ์ฌ๋ผ์ด๋ ๋ง์ปค๊ฐ ์์ผ๋ฉด ๋ค๋ฅธ ํจํด ์๋ | |
| if not slide_markers: | |
| # ์ซ์๋ง์ผ๋ก ์์ํ๋ ํจํด๋ ์ฐพ๊ธฐ (์: "1.", "2." ๋ฑ) | |
| for match in re.finditer(r'^(\d+)[.)]\s*$', response, re.MULTILINE): | |
| slide_markers.append({ | |
| 'index': int(match.group(1)), | |
| 'start': match.start(), | |
| 'end': match.end() | |
| }) | |
| # ๊ฐ ์ฌ๋ผ์ด๋ ๋ง์ปค ์ฌ์ด์ ๋ด์ฉ์ ์ถ์ถ | |
| for i, marker in enumerate(slide_markers): | |
| # ํ์ฌ ์ฌ๋ผ์ด๋์ ์์๊ณผ ๋ ์์น | |
| start = marker['end'] | |
| if i < len(slide_markers) - 1: | |
| end = slide_markers[i + 1]['start'] | |
| else: | |
| end = len(response) | |
| section = response[start:end].strip() | |
| if not section: | |
| continue | |
| logger.debug(f"Processing slide {marker['index']}: {section[:100]}...") | |
| slide = { | |
| 'title': '', | |
| 'content': '', | |
| 'notes': '', | |
| 'layout': 'title_content', | |
| 'chart_data': None | |
| } | |
| # ์น์ ๋ด์์ ์ ๋ชฉ, ๋ด์ฉ, ๋ ธํธ ์ถ์ถ | |
| lines = section.split('\n') | |
| current_part = None | |
| title_found = False | |
| content_lines = [] | |
| notes_lines = [] | |
| for line in lines: | |
| line = line.strip() | |
| if not line: | |
| continue | |
| # ์ ๋ชฉ ์น์ ๊ฐ์ง | |
| if (line.startswith('์ ๋ชฉ:') or line.startswith('Title:')) and not title_found: | |
| current_part = 'title' | |
| title_text = line.split(':', 1)[1].strip() if ':' in line else '' | |
| slide['title'] = title_text | |
| title_found = True | |
| # ๋ด์ฉ ์น์ ๊ฐ์ง | |
| elif line.startswith('๋ด์ฉ:') or line.startswith('Content:'): | |
| current_part = 'content' | |
| content_text = line.split(':', 1)[1].strip() if ':' in line else '' | |
| if content_text: | |
| content_lines.append(content_text) | |
| # ๋ ธํธ ์น์ ๊ฐ์ง | |
| elif line.startswith('๋ ธํธ:') or line.startswith('Notes:') or line.startswith('๋ฐํ์ ๋ ธํธ:'): | |
| current_part = 'notes' | |
| notes_text = line.split(':', 1)[1].strip() if ':' in line else '' | |
| if notes_text: | |
| notes_lines.append(notes_text) | |
| # ํ์ฌ ์น์ ์ ๋ฐ๋ผ ๋ด์ฉ ์ถ๊ฐ | |
| else: | |
| if current_part == 'title' and not slide['title']: | |
| slide['title'] = line | |
| elif current_part == 'content': | |
| content_lines.append(line) | |
| elif current_part == 'notes': | |
| notes_lines.append(line) | |
| elif not title_found and not slide['title']: | |
| # ์ฒซ ๋ฒ์งธ ์ค์ ์ ๋ชฉ์ผ๋ก | |
| slide['title'] = line | |
| title_found = True | |
| current_part = 'content' | |
| elif current_part is None and title_found: | |
| current_part = 'content' | |
| content_lines.append(line) | |
| # ์ฌ๋ผ์ด๋ ๋ฐ์ดํฐ ์ค์ | |
| slide['content'] = '\n'.join(content_lines).strip() | |
| slide['notes'] = ' '.join(notes_lines).strip() | |
| # ๋ด์ฉ์ด ์๋ ๊ฒฝ์ฐ์๋ง ์ถ๊ฐ | |
| if slide['title'] or slide['content']: | |
| logger.info(f"Slide {len(slides)+1}: Title='{slide['title'][:30]}...', Content length={len(slide['content'])}") | |
| slides.append(slide) | |
| # ๋ง์ฝ ์ ๋ฐฉ๋ฒ์ผ๋ก ํ์ฑ์ด ์ ๋์๋ค๋ฉด, ๋ ์ ์ฐํ ๋ฐฉ๋ฒ ์๋ | |
| if not slides or len(slides) < 3: | |
| logger.warning(f"Primary parsing resulted in only {len(slides)} slides, trying alternative method...") | |
| slides = [] | |
| # "์ ๋ชฉ:" ํจํด์ผ๋ก ์ฌ๋ผ์ด๋ ๊ตฌ๋ถ ์๋ | |
| sections = re.split(r'\n(?=์ ๋ชฉ:|Title:)', response) | |
| for section in sections: | |
| if not section.strip(): | |
| continue | |
| slide = { | |
| 'title': '', | |
| 'content': '', | |
| 'notes': '', | |
| 'layout': 'title_content', | |
| 'chart_data': None | |
| } | |
| lines = section.strip().split('\n') | |
| current_part = None | |
| content_lines = [] | |
| notes_lines = [] | |
| for line in lines: | |
| line = line.strip() | |
| if not line: | |
| continue | |
| if line.startswith('์ ๋ชฉ:') or line.startswith('Title:'): | |
| slide['title'] = line.split(':', 1)[1].strip() if ':' in line else '' | |
| current_part = 'content' | |
| elif line.startswith('๋ด์ฉ:') or line.startswith('Content:'): | |
| current_part = 'content' | |
| elif line.startswith('๋ ธํธ:') or line.startswith('Notes:'): | |
| current_part = 'notes' | |
| notes_text = line.split(':', 1)[1].strip() if ':' in line else '' | |
| if notes_text: | |
| notes_lines.append(notes_text) | |
| elif current_part == 'content': | |
| content_lines.append(line) | |
| elif current_part == 'notes': | |
| notes_lines.append(line) | |
| slide['content'] = '\n'.join(content_lines).strip() | |
| slide['notes'] = ' '.join(notes_lines).strip() | |
| # ์ฌ๋ผ์ด๋ ๋ฒํธ ์ ๊ฑฐ | |
| slide['title'] = re.sub(r'^(์ฌ๋ผ์ด๋|Slide)\s*\d+\s*[:๏ผ\-]?\s*', '', slide['title'], flags=re.IGNORECASE) | |
| if slide['title'] or slide['content']: | |
| slides.append(slide) | |
| logger.info(f"Total slides parsed: {len(slides)}") | |
| # ํ์ฑ ๊ฒฐ๊ณผ ๊ฒ์ฆ | |
| if len(slides) < 3: | |
| logger.error("Parsing resulted in too few slides. Raw response preview:") | |
| logger.error(response[:1000]) | |
| return slides | |
| def force_font_size(text_frame, font_size_pt: int, theme: Dict): | |
| """Force font size for all paragraphs and runs in a text frame""" | |
| if not text_frame: | |
| return | |
| try: | |
| # Ensure paragraphs exist | |
| if not hasattr(text_frame, 'paragraphs'): | |
| return | |
| for paragraph in text_frame.paragraphs: | |
| try: | |
| # Set paragraph level font | |
| if hasattr(paragraph, 'font'): | |
| paragraph.font.size = Pt(font_size_pt) | |
| paragraph.font.name = theme['fonts']['body'] | |
| paragraph.font.color.rgb = theme['colors']['text'] | |
| # Set run level font (most important for actual rendering) | |
| if hasattr(paragraph, 'runs'): | |
| for run in paragraph.runs: | |
| run.font.size = Pt(font_size_pt) | |
| run.font.name = theme['fonts']['body'] | |
| run.font.color.rgb = theme['colors']['text'] | |
| # If paragraph has no runs but has text, create a run | |
| if paragraph.text and (not hasattr(paragraph, 'runs') or len(paragraph.runs) == 0): | |
| # Force creation of runs by modifying text | |
| temp_text = paragraph.text | |
| paragraph.text = temp_text # This creates runs | |
| if hasattr(paragraph, 'runs'): | |
| for run in paragraph.runs: | |
| run.font.size = Pt(font_size_pt) | |
| run.font.name = theme['fonts']['body'] | |
| run.font.color.rgb = theme['colors']['text'] | |
| except Exception as e: | |
| logger.warning(f"Error setting font for paragraph: {e}") | |
| continue | |
| except Exception as e: | |
| logger.warning(f"Error in force_font_size: {e}") | |
| def apply_theme_to_slide(slide, theme: Dict, layout_type: str = 'title_content'): | |
| """Apply design theme to a slide with consistent styling""" | |
| # Add colored background shape for all slides | |
| bg_shape = slide.shapes.add_shape( | |
| MSO_SHAPE.RECTANGLE, 0, 0, Inches(10), Inches(5.625) | |
| ) | |
| bg_shape.fill.solid() | |
| # Use lighter background for content slides | |
| if layout_type in ['title_content', 'two_content', 'comparison']: | |
| # Light background with subtle gradient effect | |
| bg_shape.fill.fore_color.rgb = theme['colors']['background'] | |
| # Add accent strip at top | |
| accent_strip = slide.shapes.add_shape( | |
| MSO_SHAPE.RECTANGLE, 0, 0, Inches(10), Inches(0.5) | |
| ) | |
| accent_strip.fill.solid() | |
| accent_strip.fill.fore_color.rgb = theme['colors']['primary'] | |
| accent_strip.line.fill.background() | |
| # Add bottom accent | |
| bottom_strip = slide.shapes.add_shape( | |
| MSO_SHAPE.RECTANGLE, 0, Inches(5.125), Inches(10), Inches(0.5) | |
| ) | |
| bottom_strip.fill.solid() | |
| bottom_strip.fill.fore_color.rgb = theme['colors']['secondary'] | |
| bottom_strip.fill.transparency = 0.7 | |
| bottom_strip.line.fill.background() | |
| else: | |
| # Section headers get primary color background | |
| bg_shape.fill.fore_color.rgb = theme['colors']['primary'] | |
| bg_shape.line.fill.background() | |
| # Move background shapes to back | |
| slide.shapes._spTree.remove(bg_shape._element) | |
| slide.shapes._spTree.insert(2, bg_shape._element) | |
| # Apply title formatting if exists | |
| if slide.shapes.title: | |
| try: | |
| title = slide.shapes.title | |
| if title.text_frame and title.text_frame.paragraphs: | |
| for paragraph in title.text_frame.paragraphs: | |
| paragraph.font.name = theme['fonts']['title'] | |
| paragraph.font.bold = True | |
| # UPDATED: Increased font sizes for better readability | |
| if layout_type == 'section_header': | |
| paragraph.font.size = Pt(28) # Increased from 20 | |
| paragraph.font.color.rgb = RGBColor(255, 255, 255) | |
| paragraph.alignment = PP_ALIGN.CENTER | |
| else: | |
| paragraph.font.size = Pt(24) # Increased from 18 | |
| paragraph.font.color.rgb = theme['colors']['primary'] | |
| paragraph.alignment = PP_ALIGN.LEFT | |
| except Exception as e: | |
| logger.warning(f"Title formatting failed: {e}") | |
| # Apply content formatting with improved readability | |
| # NOTE: Do NOT add emojis here - they will be added in create_advanced_ppt_from_content | |
| for shape in slide.shapes: | |
| if shape.has_text_frame and shape != slide.shapes.title: | |
| try: | |
| text_frame = shape.text_frame | |
| # Set text frame margins for better spacing | |
| text_frame.margin_left = Inches(0.25) | |
| text_frame.margin_right = Inches(0.25) | |
| text_frame.margin_top = Inches(0.1) | |
| text_frame.margin_bottom = Inches(0.1) | |
| # Only apply font formatting, no content modification | |
| if text_frame.text.strip(): | |
| # Use force_font_size helper to ensure font is applied | |
| force_font_size(text_frame, 16, theme) # Increased from 12 | |
| for paragraph in text_frame.paragraphs: | |
| # Add line spacing for better readability | |
| paragraph.space_after = Pt(4) # Increased from 3 | |
| paragraph.line_spacing = 1.2 # Increased from 1.1 | |
| except Exception as e: | |
| logger.warning(f"Content formatting failed: {e}") | |
| def add_gradient_background(slide, color1: RGBColor, color2: RGBColor): | |
| """Add gradient-like background to slide using shapes""" | |
| # Note: python-pptx doesn't directly support gradients in backgrounds, | |
| # so we'll create a gradient effect using overlapping shapes | |
| left = top = 0 | |
| width = Inches(10) | |
| height = Inches(5.625) | |
| # Add base color rectangle | |
| shape1 = slide.shapes.add_shape( | |
| MSO_SHAPE.RECTANGLE, left, top, width, height | |
| ) | |
| shape1.fill.solid() | |
| shape1.fill.fore_color.rgb = color1 | |
| shape1.line.fill.background() | |
| # Add semi-transparent overlay for gradient effect | |
| shape2 = slide.shapes.add_shape( | |
| MSO_SHAPE.RECTANGLE, left, top, width, Inches(2.8) | |
| ) | |
| shape2.fill.solid() | |
| shape2.fill.fore_color.rgb = color2 | |
| shape2.fill.transparency = 0.5 | |
| shape2.line.fill.background() | |
| # Move shapes to back | |
| slide.shapes._spTree.remove(shape1._element) | |
| slide.shapes._spTree.remove(shape2._element) | |
| slide.shapes._spTree.insert(2, shape1._element) | |
| slide.shapes._spTree.insert(3, shape2._element) | |
| def add_decorative_shapes(slide, theme: Dict): | |
| """Add decorative shapes to enhance visual appeal""" | |
| try: | |
| # Add corner accent circle | |
| shape1 = slide.shapes.add_shape( | |
| MSO_SHAPE.OVAL, | |
| Inches(9.3), Inches(4.8), | |
| Inches(0.7), Inches(0.7) | |
| ) | |
| shape1.fill.solid() | |
| shape1.fill.fore_color.rgb = theme['colors']['accent'] | |
| shape1.fill.transparency = 0.3 | |
| shape1.line.fill.background() | |
| # Add smaller accent | |
| shape2 = slide.shapes.add_shape( | |
| MSO_SHAPE.OVAL, | |
| Inches(0.1), Inches(0.1), | |
| Inches(0.4), Inches(0.4) | |
| ) | |
| shape2.fill.solid() | |
| shape2.fill.fore_color.rgb = theme['colors']['secondary'] | |
| shape2.fill.transparency = 0.5 | |
| shape2.line.fill.background() | |
| except Exception as e: | |
| logger.warning(f"Failed to add decorative shapes: {e}") | |
| def create_chart_slide(slide, chart_data: Dict, theme: Dict): | |
| """Create a chart on the slide based on data""" | |
| try: | |
| # Add chart | |
| x, y, cx, cy = Inches(1), Inches(2), Inches(8), Inches(4.5) | |
| # Prepare chart data | |
| chart_data_obj = CategoryChartData() | |
| # Simple bar chart example | |
| if 'columns' in chart_data and 'sample_data' in chart_data: | |
| # Use first numeric column for chart | |
| numeric_cols = [] | |
| for col in chart_data['columns']: | |
| try: | |
| # Check if column has numeric data | |
| float(chart_data['sample_data'][0].get(col, 0)) | |
| numeric_cols.append(col) | |
| except: | |
| pass | |
| if numeric_cols: | |
| categories = [str(row.get(chart_data['columns'][0], '')) | |
| for row in chart_data['sample_data'][:5]] | |
| chart_data_obj.categories = categories | |
| for col in numeric_cols[:3]: # Max 3 series | |
| values = [float(row.get(col, 0)) | |
| for row in chart_data['sample_data'][:5]] | |
| chart_data_obj.add_series(col, values) | |
| chart = slide.shapes.add_chart( | |
| XL_CHART_TYPE.COLUMN_CLUSTERED, x, y, cx, cy, chart_data_obj | |
| ).chart | |
| # Style the chart | |
| chart.has_legend = True | |
| chart.legend.position = XL_LEGEND_POSITION.BOTTOM | |
| except Exception as e: | |
| logger.warning(f"Chart creation failed: {e}") | |
| # If chart fails, add a text placeholder instead | |
| textbox = slide.shapes.add_textbox(x, y, cx, cy) | |
| text_frame = textbox.text_frame | |
| text_frame.text = "๋ฐ์ดํฐ ์ฐจํธ (์ฐจํธ ์์ฑ ์คํจ)" | |
| text_frame.paragraphs[0].font.size = Pt(16) # Increased font size | |
| text_frame.paragraphs[0].font.color.rgb = theme['colors']['secondary'] | |
| def create_advanced_ppt_from_content( | |
| slides_data: list, | |
| topic: str, | |
| theme_name: str, | |
| include_charts: bool = False, | |
| include_ai_image: bool = False, | |
| include_diagrams: bool = False, | |
| include_flux_images: bool = False | |
| ) -> str: | |
| """Create advanced PPT file with limited visuals (max 6 images + 2 diagrams)""" | |
| if not PPTX_AVAILABLE: | |
| raise ImportError("python-pptx library is required") | |
| prs = Presentation() | |
| theme = DESIGN_THEMES.get(theme_name, DESIGN_THEMES['professional']) | |
| # Set slide size (16:9) | |
| prs.slide_width = Inches(10) | |
| prs.slide_height = Inches(5.625) | |
| # ์ด๋ฏธ์ง์ ๋ค์ด์ด๊ทธ๋จ ์นด์ดํฐ ๋ฐ ์ถ์ | |
| image_count_3d = 0 | |
| image_count_flux = 0 | |
| diagram_count = 0 | |
| max_images_per_api = 3 | |
| max_diagrams = 2 | |
| # ๋ค์ด์ด๊ทธ๋จ์ด ํ์ํ ์ฌ๋ผ์ด๋๋ฅผ ๋ฏธ๋ฆฌ ๋ถ์ | |
| diagram_candidates = [] | |
| if include_diagrams: | |
| for i, slide_data in enumerate(slides_data): | |
| title = slide_data.get('title', '') | |
| content = slide_data.get('content', '') | |
| diagram_type, score = detect_diagram_type_with_score(title, content) | |
| if diagram_type and score > 0: | |
| diagram_candidates.append((i, diagram_type, score)) | |
| # ํ์๋ ์ ์๊ฐ ๋์ ์์ผ๋ก ์ ๋ ฌํ๊ณ ์์ 2๊ฐ๋ง ์ ํ | |
| diagram_candidates.sort(key=lambda x: x[2], reverse=True) | |
| diagram_candidates = diagram_candidates[:max_diagrams] | |
| diagram_slide_indices = [x[0] for x in diagram_candidates] | |
| else: | |
| diagram_slide_indices = [] | |
| # โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| # 1) ์ ๋ชฉ ์ฌ๋ผ์ด๋(ํ์ง) ์์ฑ | |
| # โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| title_slide_layout = prs.slide_layouts[0] | |
| slide = prs.slides.add_slide(title_slide_layout) | |
| # ๋ฐฐ๊ฒฝ ๊ทธ๋ผ๋์ธํธ | |
| add_gradient_background(slide, theme['colors']['primary'], theme['colors']['secondary']) | |
| # ์ ๋ชฉ๊ณผ ๋ถ์ ๋ชฉ์ ์ค์ ์๋จ์ ๋ฐฐ์น | |
| title_shape = slide.shapes.title | |
| subtitle_shape = slide.placeholders[1] if len(slide.placeholders) > 1 else None | |
| if title_shape: | |
| title_shape.left = Inches(0.5) | |
| title_shape.width = prs.slide_width - Inches(1) | |
| title_shape.top = Inches(1.0) | |
| title_shape.height = Inches(1.2) | |
| tf = title_shape.text_frame | |
| tf.clear() | |
| tf.text = topic | |
| p = tf.paragraphs[0] | |
| p.font.name = theme['fonts']['title'] | |
| p.font.size = Pt(36) | |
| p.font.bold = True | |
| p.font.color.rgb = RGBColor(255, 255, 255) | |
| p.alignment = PP_ALIGN.CENTER | |
| if subtitle_shape: | |
| subtitle_shape.left = Inches(0.5) | |
| subtitle_shape.width = prs.slide_width - Inches(1) | |
| subtitle_shape.top = Inches(2.2) | |
| subtitle_shape.height = Inches(0.9) | |
| tf2 = subtitle_shape.text_frame | |
| tf2.clear() | |
| tf2.text = f"์๋ ์์ฑ๋ ํ๋ ์ ํ ์ด์ โข ์ด {len(slides_data)}์ฅ" | |
| p2 = tf2.paragraphs[0] | |
| p2.font.name = theme['fonts']['subtitle'] | |
| p2.font.size = Pt(20) | |
| p2.font.color.rgb = RGBColor(255, 255, 255) | |
| p2.alignment = PP_ALIGN.CENTER | |
| # ํ์ง ์ด๋ฏธ์ง (์นด์ดํธ์ ํฌํจ) | |
| if include_ai_image and (AI_IMAGE_ENABLED or FLUX_API_ENABLED): | |
| logger.info("Generating AI cover images via parallel APIs...") | |
| prompt_3d, prompt_photo = generate_cover_image_prompts(topic, slides_data) | |
| image_3d, image_photo = generate_images_parallel(prompt_3d, prompt_photo) | |
| # 3D ์ฐ์ ์ ํ | |
| ai_image_path = None | |
| if image_3d and image_count_3d < max_images_per_api: | |
| ai_image_path = image_3d | |
| image_count_3d += 1 | |
| elif image_photo and image_count_flux < max_images_per_api: | |
| ai_image_path = image_photo | |
| image_count_flux += 1 | |
| if ai_image_path and os.path.exists(ai_image_path): | |
| try: | |
| img = Image.open(ai_image_path) | |
| img_width, img_height = img.size | |
| max_width = Inches(3.5) | |
| max_height = Inches(2.5) | |
| ratio = img_height / img_width | |
| img_w = max_width | |
| img_h = max_width * ratio | |
| if img_h > max_height: | |
| img_h = max_height | |
| img_w = max_height / ratio | |
| left = prs.slide_width - img_w - Inches(0.5) | |
| top = prs.slide_height - img_h - Inches(0.8) | |
| pic = slide.shapes.add_picture(ai_image_path, left, top, width=img_w, height=img_h) | |
| pic.shadow.inherit = False | |
| pic.shadow.visible = True | |
| pic.shadow.blur_radius = Pt(15) | |
| pic.shadow.distance = Pt(8) | |
| pic.shadow.angle = 45 | |
| caption_box = slide.shapes.add_textbox( | |
| left, top - Inches(0.3), | |
| img_w, Inches(0.3) | |
| ) | |
| caption_tf = caption_box.text_frame | |
| caption_tf.text = "AI Generated" | |
| caption_p = caption_tf.paragraphs[0] | |
| caption_p.font.size = Pt(10) | |
| caption_p.font.color.rgb = RGBColor(255, 255, 255) | |
| caption_p.alignment = PP_ALIGN.CENTER | |
| # ์์ ํ์ผ ์ ๋ฆฌ | |
| for temp_path in [image_3d, image_photo]: | |
| if temp_path and os.path.exists(temp_path): | |
| try: | |
| os.unlink(temp_path) | |
| except: | |
| pass | |
| except Exception as e: | |
| logger.error(f"Failed to add cover image: {e}") | |
| add_decorative_shapes(slide, theme) | |
| # โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| # 2) ์ปจํ ์ธ ์ฌ๋ผ์ด๋ ์์ฑ | |
| # โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| for i, slide_data in enumerate(slides_data): | |
| layout_type = slide_data.get('layout', 'title_content') | |
| logger.info(f"Creating slide {i+1}: {slide_data.get('title', 'No title')}") | |
| logger.debug(f"Content length: {len(slide_data.get('content', ''))}") | |
| # Choose appropriate layout | |
| if layout_type == 'section_header' and len(prs.slide_layouts) > 2: | |
| slide_layout = prs.slide_layouts[2] | |
| elif layout_type == 'two_content' and len(prs.slide_layouts) > 3: | |
| slide_layout = prs.slide_layouts[3] | |
| elif layout_type == 'comparison' and len(prs.slide_layouts) > 4: | |
| slide_layout = prs.slide_layouts[4] | |
| else: | |
| slide_layout = prs.slide_layouts[1] if len(prs.slide_layouts) > 1 else prs.slide_layouts[0] | |
| slide = prs.slides.add_slide(slide_layout) | |
| # Apply theme to EVERY slide for consistency | |
| apply_theme_to_slide(slide, theme, layout_type) | |
| # Set title | |
| if slide.shapes.title: | |
| slide.shapes.title.text = slide_data.get('title', '์ ๋ชฉ ์์') | |
| try: | |
| title_text_frame = slide.shapes.title.text_frame | |
| if title_text_frame and title_text_frame.paragraphs: | |
| for paragraph in title_text_frame.paragraphs: | |
| if layout_type == 'section_header': | |
| paragraph.font.size = Pt(28) | |
| paragraph.font.color.rgb = RGBColor(255, 255, 255) | |
| paragraph.alignment = PP_ALIGN.CENTER | |
| else: | |
| paragraph.font.size = Pt(24) | |
| paragraph.font.color.rgb = theme['colors']['primary'] | |
| paragraph.font.bold = True | |
| paragraph.font.name = theme['fonts']['title'] | |
| except Exception as e: | |
| logger.warning(f"Title font sizing failed: {e}") | |
| # ์ฌ๋ผ์ด๋ ์ ๋ณด | |
| slide_title = slide_data.get('title', '') | |
| slide_content = slide_data.get('content', '') | |
| # ๊ฒฐ๋ก /ํ์ด๋ผ์ดํธ ์ฌ๋ผ์ด๋ ๊ฐ์ง | |
| is_conclusion_slide = any(word in slide_title.lower() for word in | |
| ['๊ฒฐ๋ก ', 'conclusion', '์์ฝ', 'summary', 'ํต์ฌ', 'key', | |
| '๋ง๋ฌด๋ฆฌ', 'closing', '์ ๋ฆฌ', 'takeaway', '์์ฌ์ ', 'implication']) | |
| # ์๊ฐ์ ์์ ์ถ๊ฐ ์ฌ๋ถ ๊ฒฐ์ | |
| should_add_visual = False | |
| visual_type = None | |
| # 1. ๋ค์ด์ด๊ทธ๋จ ์ฐ์ ํ์ธ | |
| if i in diagram_slide_indices and diagram_count < max_diagrams: | |
| should_add_visual = True | |
| diagram_info = next(x for x in diagram_candidates if x[0] == i) | |
| visual_type = ('diagram', diagram_info[1]) | |
| diagram_count += 1 | |
| # 2. ๊ฒฐ๋ก ์ฌ๋ผ์ด๋๋ ์ด๋ฏธ์ง ์ถ๊ฐ (์ด๋ฏธ์ง ์ ํ ๋ด์์) | |
| elif is_conclusion_slide and include_flux_images and (image_count_3d + image_count_flux) < (max_images_per_api * 2): | |
| should_add_visual = True | |
| visual_type = ('conclusion_images', None) | |
| # 3. ์ผ๋ฐ ์ฌ๋ผ์ด๋ ์ด๋ฏธ์ง (์ด๋ฏธ์ง ์ ํ ๋ด์์, 3~4์ฅ๋ง๋ค ํ๋์ฉ) | |
| elif include_flux_images and (image_count_3d + image_count_flux) < (max_images_per_api * 2) and i % 3 == 0: | |
| should_add_visual = True | |
| visual_type = ('diverse_images', None) | |
| # ์๊ฐ์ ์์๊ฐ ์๋ ๊ฒฝ์ฐ ์ข-์ฐ ๋ ์ด์์ ์ ์ฉ | |
| if should_add_visual and layout_type not in ['section_header']: | |
| # ์ข์ธก์ ํ ์คํธ ๋ฐฐ์น | |
| left_box = slide.shapes.add_textbox( | |
| Inches(0.5), Inches(1.5), Inches(4.5), Inches(3.5) | |
| ) | |
| left_tf = left_box.text_frame | |
| left_tf.clear() | |
| left_tf.text = slide_content | |
| left_tf.word_wrap = True | |
| force_font_size(left_tf, 14, theme) | |
| # Apply emoji bullets | |
| for paragraph in left_tf.paragraphs: | |
| text = paragraph.text.strip() | |
| if text and text.startswith(('-', 'โข', 'โ')) and not has_emoji(text): | |
| clean_text = text.lstrip('-โขโ ') | |
| emoji = get_emoji_for_content(clean_text) | |
| paragraph.text = f"{emoji} {clean_text}" | |
| force_font_size(left_tf, 14, theme) | |
| # ์ฐ์ธก์ ์๊ฐ์ ์์ ์ถ๊ฐ | |
| visual_added = False | |
| if visual_type[0] == 'diagram' and DIAGRAM_GENERATORS_AVAILABLE: | |
| # ๋ค์ด์ด๊ทธ๋จ ์์ฑ | |
| logger.info(f"Generating {visual_type[1]} for slide {i+1} (Diagram {diagram_count}/{max_diagrams})") | |
| diagram_json = generate_diagram_json(slide_title, slide_content, visual_type[1]) | |
| if diagram_json: | |
| diagram_path = generate_diagram_locally(diagram_json, visual_type[1], "png") | |
| if diagram_path and os.path.exists(diagram_path): | |
| try: | |
| pic = slide.shapes.add_picture( | |
| diagram_path, | |
| Inches(5.2), Inches(1.5), | |
| width=Inches(4.3), height=Inches(3.0) | |
| ) | |
| visual_added = True | |
| caption_box = slide.shapes.add_textbox( | |
| Inches(5.2), Inches(4.6), Inches(4.3), Inches(0.3) | |
| ) | |
| caption_tf = caption_box.text_frame | |
| caption_tf.text = f"{visual_type[1]} Diagram" | |
| caption_p = caption_tf.paragraphs[0] | |
| caption_p.font.size = Pt(10) | |
| caption_p.font.color.rgb = theme['colors']['secondary'] | |
| caption_p.alignment = PP_ALIGN.CENTER | |
| try: | |
| os.unlink(diagram_path) | |
| except: | |
| pass | |
| except Exception as e: | |
| logger.error(f"Failed to add diagram: {e}") | |
| elif visual_type[0] == 'conclusion_images': | |
| # ๊ฒฐ๋ก ์ฌ๋ผ์ด๋์ฉ ์ด๋ฏธ์ง ์์ฑ | |
| logger.info(f"Generating conclusion images for slide {i+1}") | |
| prompt_3d, prompt_photo = generate_conclusion_image_prompts(slide_title, slide_content) | |
| image_3d = None | |
| image_photo = None | |
| if image_count_3d < max_images_per_api: | |
| image_3d = generate_ai_image_via_3d_api(prompt_3d) | |
| if image_count_flux < max_images_per_api: | |
| image_photo = generate_flux_image_via_api(prompt_photo) | |
| # ์ ํ | |
| selected_image = None | |
| if image_photo and image_count_flux < max_images_per_api: | |
| selected_image = image_photo | |
| image_count_flux += 1 | |
| elif image_3d and image_count_3d < max_images_per_api: | |
| selected_image = image_3d | |
| image_count_3d += 1 | |
| if selected_image and os.path.exists(selected_image): | |
| try: | |
| pic = slide.shapes.add_picture( | |
| selected_image, | |
| Inches(5.2), Inches(1.5), | |
| width=Inches(4.3), height=Inches(3.0) | |
| ) | |
| visual_added = True | |
| caption_box = slide.shapes.add_textbox( | |
| Inches(5.2), Inches(4.6), Inches(4.3), Inches(0.3) | |
| ) | |
| caption_tf = caption_box.text_frame | |
| caption_tf.text = "Key Takeaway Visualization" | |
| caption_p = caption_tf.paragraphs[0] | |
| caption_p.font.size = Pt(10) | |
| caption_p.font.color.rgb = theme['colors']['secondary'] | |
| caption_p.alignment = PP_ALIGN.CENTER | |
| for temp_path in [image_3d, image_photo]: | |
| if temp_path and os.path.exists(temp_path): | |
| try: | |
| os.unlink(temp_path) | |
| except: | |
| pass | |
| except Exception as e: | |
| logger.error(f"Failed to add conclusion image: {e}") | |
| elif visual_type[0] == 'diverse_images': | |
| # --- FLUX ๋ค์ด์ด๊ทธ๋จ(6-style) & 3D ์ด๋ฏธ์ง ์์ฑ --- | |
| logger.info( | |
| f"Generating FLUX diagram-style image for slide {i+1} " | |
| f"(3D {image_count_3d}/{max_images_per_api}, " | |
| f"FLUX {image_count_flux}/{max_images_per_api})" | |
| ) | |
| # 3D-API์ฉ(๊ทธ๋ฆผยท์ผ๋ฌ์คํธ) ํ๋กฌํํธ๋ ๊ทธ๋๋ก | |
| prompt_3d, _ = generate_diverse_prompt(slide_title, slide_content, i) | |
| # โก FLUX-API์ฉ ํ๋กฌํํธ: 6๊ฐ์ง ๋ค์ด์ด๊ทธ๋จ ๊ตฌ๋ ์ค ํ๋ ์ ํ | |
| style_key = pick_flux_style(i) # โ โ ์ ํจ์ ์ฌ์ฉ | |
| prompt_flux = generate_flux_prompt(slide_title, slide_content, style_key) | |
| selected_image = None | |
| # 3D-API ๋จผ์ ์๋ (์ง์ ์ฌ๋ผ์ด๋ ์ฐ์ ์ด๋ผ๋ ๊ธฐ์กด ์ ์ฑ ์ ์ง) | |
| if (i % 2 == 0) and (image_count_3d < max_images_per_api): | |
| img_3d = generate_ai_image_via_3d_api(prompt_3d) | |
| if img_3d: | |
| selected_image = img_3d | |
| image_count_3d += 1 | |
| # FLUX-API (๋ค์ด์ด๊ทธ๋จ ์คํ์ผ) ์๋ | |
| if (selected_image is None) and (image_count_flux < max_images_per_api): | |
| img_flux = generate_flux_image_via_api(prompt_flux) | |
| if img_flux: | |
| selected_image = img_flux | |
| image_count_flux += 1 | |
| # ์ฌ๋ผ์ด๋์ ์ฝ์ | |
| if selected_image and os.path.exists(selected_image): | |
| try: | |
| slide.shapes.add_picture( | |
| selected_image, | |
| Inches(5.2), Inches(1.5), | |
| width=Inches(4.3), height=Inches(3.0) | |
| ) | |
| visual_added = True | |
| except Exception as e: | |
| logger.error(f"Failed to add slide image: {e}") | |
| finally: | |
| # ์์ ํ์ผ ์ ๋ฆฌ | |
| try: | |
| os.unlink(selected_image) | |
| except: | |
| pass | |
| # ์๊ฐ์ ์์๊ฐ ์ถ๊ฐ๋์ง ์์ ๊ฒฝ์ฐ ํ๋ ์ด์คํ๋ ์ถ๊ฐ | |
| if not visual_added: | |
| placeholder_box = slide.shapes.add_textbox( | |
| Inches(5.2), Inches(2.5), Inches(4.3), Inches(1.0) | |
| ) | |
| placeholder_tf = placeholder_box.text_frame | |
| placeholder_tf.text = f"{visual_type[1] if visual_type[0] == 'diagram' else 'Visual'} Placeholder" | |
| placeholder_tf.paragraphs[0].font.size = Pt(14) | |
| placeholder_tf.paragraphs[0].font.color.rgb = theme['colors']['secondary'] | |
| placeholder_tf.paragraphs[0].alignment = PP_ALIGN.CENTER | |
| else: | |
| # ๊ธฐ๋ณธ ๋ ์ด์์ (์๊ฐ์ ์์ ์์) | |
| if layout_type == 'section_header': | |
| content = slide_data.get('content', '') | |
| if content: | |
| logger.info(f"Adding content to section header slide {i+1}: {content[:50]}...") | |
| textbox = slide.shapes.add_textbox( | |
| Inches(1), Inches(3.5), Inches(8), Inches(1.5) | |
| ) | |
| tf = textbox.text_frame | |
| tf.clear() | |
| tf.text = content | |
| tf.word_wrap = True | |
| for paragraph in tf.paragraphs: | |
| paragraph.font.name = theme['fonts']['body'] | |
| paragraph.font.size = Pt(16) | |
| paragraph.font.color.rgb = RGBColor(255, 255, 255) | |
| paragraph.alignment = PP_ALIGN.CENTER | |
| line = slide.shapes.add_shape( | |
| MSO_SHAPE.RECTANGLE, Inches(3), Inches(3.2), Inches(4), Pt(4) | |
| ) | |
| line.fill.solid() | |
| line.fill.fore_color.rgb = RGBColor(255, 255, 255) | |
| line.line.fill.background() | |
| elif layout_type == 'two_content': | |
| content = slide_data.get('content', '') | |
| if content: | |
| logger.info(f"Creating two-column layout for slide {i+1}") | |
| content_lines = content.split('\n') | |
| mid_point = len(content_lines) // 2 | |
| # Left column | |
| left_box = slide.shapes.add_textbox( | |
| Inches(0.5), Inches(1.5), Inches(4.5), Inches(3.5) | |
| ) | |
| left_tf = left_box.text_frame | |
| left_tf.clear() | |
| left_content = '\n'.join(content_lines[:mid_point]) | |
| if left_content: | |
| left_tf.text = left_content | |
| left_tf.word_wrap = True | |
| force_font_size(left_tf, 14, theme) | |
| for paragraph in left_tf.paragraphs: | |
| text = paragraph.text.strip() | |
| if text and text.startswith(('-', 'โข', 'โ')) and not has_emoji(text): | |
| clean_text = text.lstrip('-โขโ ') | |
| emoji = get_emoji_for_content(clean_text) | |
| paragraph.text = f"{emoji} {clean_text}" | |
| force_font_size(left_tf, 14, theme) | |
| # Right column | |
| right_box = slide.shapes.add_textbox( | |
| Inches(5), Inches(1.5), Inches(4.5), Inches(3.5) | |
| ) | |
| right_tf = right_box.text_frame | |
| right_tf.clear() | |
| right_content = '\n'.join(content_lines[mid_point:]) | |
| if right_content: | |
| right_tf.text = right_content | |
| right_tf.word_wrap = True | |
| force_font_size(right_tf, 14, theme) | |
| for paragraph in right_tf.paragraphs: | |
| text = paragraph.text.strip() | |
| if text and text.startswith(('-', 'โข', 'โ')) and not has_emoji(text): | |
| clean_text = text.lstrip('-โขโ ') | |
| emoji = get_emoji_for_content(clean_text) | |
| paragraph.text = f"{emoji} {clean_text}" | |
| force_font_size(right_tf, 14, theme) | |
| else: | |
| # Regular content | |
| content = slide_data.get('content', '') | |
| logger.info(f"Slide {i+1} - Content to add: '{content[:100]}...' (length: {len(content)})") | |
| if include_charts and slide_data.get('chart_data'): | |
| create_chart_slide(slide, slide_data['chart_data'], theme) | |
| if content and content.strip(): | |
| textbox = slide.shapes.add_textbox( | |
| Inches(0.5), Inches(1.5), Inches(9), Inches(3.5) | |
| ) | |
| tf = textbox.text_frame | |
| tf.clear() | |
| tf.text = content.strip() | |
| tf.word_wrap = True | |
| tf.margin_left = Inches(0.1) | |
| tf.margin_right = Inches(0.1) | |
| tf.margin_top = Inches(0.05) | |
| tf.margin_bottom = Inches(0.05) | |
| force_font_size(tf, 16, theme) | |
| for p_idx, paragraph in enumerate(tf.paragraphs): | |
| if paragraph.text.strip(): | |
| text = paragraph.text.strip() | |
| if text.startswith(('-', 'โข', 'โ')) and not has_emoji(text): | |
| clean_text = text.lstrip('-โขโ ') | |
| emoji = get_emoji_for_content(clean_text) | |
| paragraph.text = f"{emoji} {clean_text}" | |
| if paragraph.runs: | |
| for run in paragraph.runs: | |
| run.font.size = Pt(16) | |
| run.font.name = theme['fonts']['body'] | |
| run.font.color.rgb = theme['colors']['text'] | |
| else: | |
| paragraph.font.size = Pt(16) | |
| paragraph.font.name = theme['fonts']['body'] | |
| paragraph.font.color.rgb = theme['colors']['text'] | |
| paragraph.space_before = Pt(6) | |
| paragraph.space_after = Pt(6) | |
| paragraph.line_spacing = 1.3 | |
| logger.info(f"Successfully added content to slide {i+1}") | |
| else: | |
| logger.warning(f"Slide {i+1} has no content or empty content") | |
| # Add slide notes if available | |
| if slide_data.get('notes'): | |
| try: | |
| notes_slide = slide.notes_slide | |
| notes_text_frame = notes_slide.notes_text_frame | |
| notes_text_frame.text = slide_data.get('notes', '') | |
| except Exception as e: | |
| logger.warning(f"Failed to add slide notes: {e}") | |
| # Add slide number | |
| slide_number_bg = slide.shapes.add_shape( | |
| MSO_SHAPE.ROUNDED_RECTANGLE, | |
| Inches(8.3), Inches(5.0), Inches(1.5), Inches(0.5) | |
| ) | |
| slide_number_bg.fill.solid() | |
| slide_number_bg.fill.fore_color.rgb = theme['colors']['primary'] | |
| slide_number_bg.fill.transparency = 0.8 | |
| slide_number_bg.line.fill.background() | |
| slide_number_box = slide.shapes.add_textbox( | |
| Inches(8.3), Inches(5.05), Inches(1.5), Inches(0.4) | |
| ) | |
| slide_number_frame = slide_number_box.text_frame | |
| slide_number_frame.text = f"{i + 1} / {len(slides_data)}" | |
| slide_number_frame.paragraphs[0].font.size = Pt(10) | |
| slide_number_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255) | |
| slide_number_frame.paragraphs[0].font.bold = False | |
| slide_number_frame.paragraphs[0].alignment = PP_ALIGN.CENTER | |
| if i % 2 == 0: | |
| accent_shape = slide.shapes.add_shape( | |
| MSO_SHAPE.OVAL, | |
| Inches(9.6), Inches(0.1), | |
| Inches(0.2), Inches(0.2) | |
| ) | |
| accent_shape.fill.solid() | |
| accent_shape.fill.fore_color.rgb = theme['colors']['accent'] | |
| accent_shape.line.fill.background() | |
| # ์ด๋ฏธ์ง ์์ฑ ๋ก๊ทธ | |
| logger.info(f"Total images generated - 3D: {image_count_3d}, FLUX: {image_count_flux}, Diagrams: {diagram_count}") | |
| # Add thank you slide | |
| thank_you_layout = prs.slide_layouts[5] if len(prs.slide_layouts) > 5 else prs.slide_layouts[0] | |
| thank_you_slide = prs.slides.add_slide(thank_you_layout) | |
| add_gradient_background(thank_you_slide, theme['colors']['secondary'], theme['colors']['primary']) | |
| if thank_you_slide.shapes.title: | |
| thank_you_slide.shapes.title.text = "๊ฐ์ฌํฉ๋๋ค" | |
| try: | |
| if thank_you_slide.shapes.title.text_frame and thank_you_slide.shapes.title.text_frame.paragraphs: | |
| thank_you_slide.shapes.title.text_frame.paragraphs[0].font.size = Pt(36) | |
| thank_you_slide.shapes.title.text_frame.paragraphs[0].font.bold = True | |
| thank_you_slide.shapes.title.text_frame.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255) | |
| thank_you_slide.shapes.title.text_frame.paragraphs[0].alignment = PP_ALIGN.CENTER | |
| except Exception as e: | |
| logger.warning(f"Thank you slide styling failed: {e}") | |
| info_box = thank_you_slide.shapes.add_textbox( | |
| Inches(2), Inches(3.5), Inches(6), Inches(1) | |
| ) | |
| info_tf = info_box.text_frame | |
| info_tf.text = "AI๋ก ์์ฑ๋ ํ๋ ์ ํ ์ด์ " | |
| info_tf.paragraphs[0].font.size = Pt(18) | |
| info_tf.paragraphs[0].font.color.rgb = RGBColor(255, 255, 255) | |
| info_tf.paragraphs[0].alignment = PP_ALIGN.CENTER | |
| # Save to temporary file | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".pptx") as tmp_file: | |
| prs.save(tmp_file.name) | |
| return tmp_file.name | |
| ############################################################################## | |
| # Streaming Response Handler for PPT Generation - IMPROVED VERSION | |
| ############################################################################## | |
| def generate_ppt_content(topic: str, num_slides: int, additional_context: str, use_korean: bool = False, layout_style: str = "consistent") -> Iterator[str]: | |
| """Generate PPT content using LLM with clearer format""" | |
| # Layout instructions based on style | |
| layout_instructions = "" | |
| if layout_style == "varied": | |
| layout_instructions = """ | |
| ์ฌ๋ผ์ด๋ ๋ ์ด์์์ ๋ค์ํ๊ฒ ๊ตฌ์ฑํด์ฃผ์ธ์: | |
| - ๋งค 5๋ฒ์งธ ์ฌ๋ผ์ด๋๋ '์น์ ๊ตฌ๋ถ' ์ฌ๋ผ์ด๋๋ก ๋ง๋ค์ด์ฃผ์ธ์ | |
| - ๋น๊ต๋ ๋์กฐ ๋ด์ฉ์ด ์์ผ๋ฉด '๋น๊ต' ๋ ์ด์์์ ์ฌ์ฉํ์ธ์ | |
| - ๋ด์ฉ์ด ๋ง์ผ๋ฉด 2๋จ ๊ตฌ์ฑ์ ๊ณ ๋ คํ์ธ์ | |
| """ | |
| elif layout_style == "consistent": | |
| layout_instructions = """ | |
| ์ผ๊ด๋ ๋ ์ด์์์ ์ ์งํด์ฃผ์ธ์: | |
| - ๋ชจ๋ ์ฌ๋ผ์ด๋๋ ๋์ผํ ๊ตฌ์กฐ๋ก ์์ฑ | |
| - ์ ๋ชฉ๊ณผ ๊ธ๋จธ๋ฆฌ ๊ธฐํธ ํ์ ํต์ผ | |
| - ๊ฐ๊ฒฐํ๊ณ ๋ช ํํ ๊ตฌ์ฑ | |
| """ | |
| # ๋ ๋ช ํํ ์์คํ ํ๋กฌํํธ | |
| if use_korean: | |
| system_prompt = f"""๋น์ ์ ์ ๋ฌธ์ ์ธ PPT ํ๋ ์ ํ ์ด์ ์์ฑ ์ ๋ฌธ๊ฐ์ ๋๋ค. | |
| ์ฃผ์ด์ง ์ฃผ์ ์ ๋ํด ์ ํํ {num_slides}์ฅ์ ์ฌ๋ผ์ด๋ ๋ด์ฉ์ ์์ฑํด์ฃผ์ธ์. | |
| **๋ฐ๋์ ์๋ ํ์์ ์ ํํ ๋ฐ๋ผ์ฃผ์ธ์:** | |
| ์ฌ๋ผ์ด๋ 1 | |
| ์ ๋ชฉ: [์ฌ๋ผ์ด๋ ์ ๋ชฉ - "์ฌ๋ผ์ด๋ 1" ๊ฐ์ ๋ฒํธ๋ ํฌํจํ์ง ๋ง์ธ์] | |
| ๋ด์ฉ: | |
| - ์ฒซ ๋ฒ์งธ ํต์ฌ ํฌ์ธํธ | |
| - ๋ ๋ฒ์งธ ํต์ฌ ํฌ์ธํธ | |
| - ์ธ ๋ฒ์งธ ํต์ฌ ํฌ์ธํธ | |
| - ๋ค ๋ฒ์งธ ํต์ฌ ํฌ์ธํธ | |
| - ๋ค์ฏ ๋ฒ์งธ ํต์ฌ ํฌ์ธํธ | |
| ๋ ธํธ: [๋ฐํ์๊ฐ ์ด ์ฌ๋ผ์ด๋๋ฅผ ์ค๋ช ํ ๋ ์ฌ์ฉํ ๊ตฌ์ด์ฒด ์คํฌ๋ฆฝํธ] | |
| ์ฌ๋ผ์ด๋ 2 | |
| ์ ๋ชฉ: [์ฌ๋ผ์ด๋ ์ ๋ชฉ] | |
| ๋ด์ฉ: | |
| - ์ฒซ ๋ฒ์งธ ํต์ฌ ํฌ์ธํธ | |
| - ๋ ๋ฒ์งธ ํต์ฌ ํฌ์ธํธ | |
| - ์ธ ๋ฒ์งธ ํต์ฌ ํฌ์ธํธ | |
| - ๋ค ๋ฒ์งธ ํต์ฌ ํฌ์ธํธ | |
| - ๋ค์ฏ ๋ฒ์งธ ํต์ฌ ํฌ์ธํธ | |
| ๋ ธํธ: [๋ฐํ ์คํฌ๋ฆฝํธ] | |
| (์ด๋ฐ ์์ผ๋ก ์ฌ๋ผ์ด๋ {num_slides}๊น์ง ๊ณ์) | |
| {layout_instructions} | |
| **์ค์ ์ง์นจ:** | |
| 1. ๊ฐ ์ฌ๋ผ์ด๋๋ "์ฌ๋ผ์ด๋ ์ซ์"๋ก ์์ | |
| 2. ์ ๋ชฉ: ๋ค์ ์ค์ ์ ๋ชฉ ์์ฑ (๋ฒํธ ์ ์ธ) | |
| 3. ๋ด์ฉ: ๋ค์ ์ ํํ 5๊ฐ์ ๊ธ๋จธ๋ฆฌ ๊ธฐํธ ํฌ์ธํธ | |
| 4. ๋ ธํธ: ๋ค์ ๋ฐํ ์คํฌ๋ฆฝํธ | |
| 5. ๊ฐ ์น์ ์ฌ์ด์ ๋น ์ค ์์ | |
| 6. ์ด {num_slides}์ฅ ์์ฑ | |
| 7. ๊ฐ ํฌ์ธํธ๋ '-' ๊ธฐํธ๋ก ์์ํ์ธ์ (์ด๋ชจ์ง๋ ์๋์ผ๋ก ์ถ๊ฐ๋ฉ๋๋ค) | |
| 8. ๋ ธํธ๋ ํด๋น ์ฌ๋ผ์ด๋์ ๋ด์ฉ์ ๋ฐํ์๊ฐ ์ฒญ์ค์๊ฒ ์ค๋ช ํ๋ ๊ตฌ์ด์ฒด ๋๋ณธ์ผ๋ก ์์ฑํ์ธ์""" | |
| else: | |
| system_prompt = f"""You are a professional PPT presentation expert. | |
| Create content for exactly {num_slides} slides on the given topic. | |
| **You MUST follow this exact format:** | |
| Slide 1 | |
| Title: [Slide title - do NOT include "Slide 1" in the title] | |
| Content: | |
| - First key point | |
| - Second key point | |
| - Third key point | |
| - Fourth key point | |
| - Fifth key point | |
| Notes: [Speaker script in conversational style for explaining this slide] | |
| Slide 2 | |
| Title: [Slide title] | |
| Content: | |
| - First key point | |
| - Second key point | |
| - Third key point | |
| - Fourth key point | |
| - Fifth key point | |
| Notes: [Speaker script] | |
| (Continue this way until Slide {num_slides}) | |
| **Important instructions:** | |
| 1. Each slide starts with "Slide number" | |
| 2. Title: followed by the actual title (no numbers) | |
| 3. Content: followed by exactly 5 bullet points | |
| 4. Notes: followed by speaker script | |
| 5. No empty lines between sections | |
| 6. Create exactly {num_slides} slides | |
| 7. Start each point with '-' (emojis will be added automatically) | |
| 8. Notes should be a speaker script explaining the slide content in conversational style""" | |
| # Add search results if web search is performed | |
| if additional_context: | |
| system_prompt += f"\n\n์ฐธ๊ณ ์ ๋ณด:\n{additional_context}" | |
| # Prepare messages | |
| user_prompt = f"์ฃผ์ : {topic}\n\n์์์ ์ค๋ช ํ ํ์์ ๋ง์ถฐ ์ ํํ {num_slides}์ฅ์ PPT ์ฌ๋ผ์ด๋ ๋ด์ฉ์ ์์ฑํด์ฃผ์ธ์. ๊ฐ ์ฌ๋ผ์ด๋๋ง๋ค 5๊ฐ์ ํต์ฌ ํฌ์ธํธ์ ํจ๊ป, ๋ฐํ์๊ฐ ์ฒญ์ค์๊ฒ ํด๋น ๋ด์ฉ์ ์ค๋ช ํ๋ ๊ตฌ์ด์ฒด ๋ฐํ ๋๋ณธ์ ๋ ธํธ๋ก ์์ฑํด์ฃผ์ธ์." | |
| if not use_korean: | |
| user_prompt = f"Topic: {topic}\n\nPlease create exactly {num_slides} PPT slides following the format described above. Include exactly 5 key points per slide, and write speaker notes as a conversational script explaining the content to the audience." | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_prompt} | |
| ] | |
| # Call LLM API | |
| headers = { | |
| "Authorization": f"Bearer {FRIENDLI_TOKEN}", | |
| "Content-Type": "application/json" | |
| } | |
| payload = { | |
| "model": FRIENDLI_MODEL_ID, | |
| "messages": messages, | |
| "max_tokens": min(4000, num_slides * 300), # More tokens for 5 points + notes | |
| "top_p": 0.9, | |
| "temperature": 0.8, | |
| "stream": True, | |
| "stream_options": { | |
| "include_usage": True | |
| } | |
| } | |
| try: | |
| response = requests.post( | |
| FRIENDLI_API_URL, | |
| headers=headers, | |
| json=payload, | |
| stream=True, | |
| timeout=60 | |
| ) | |
| response.raise_for_status() | |
| full_response = "" | |
| for line in response.iter_lines(): | |
| if line: | |
| line_text = line.decode('utf-8') | |
| if line_text.startswith("data: "): | |
| data_str = line_text[6:] | |
| if data_str == "[DONE]": | |
| break | |
| try: | |
| data = json.loads(data_str) | |
| if "choices" in data and len(data["choices"]) > 0: | |
| delta = data["choices"][0].get("delta", {}) | |
| content = delta.get("content", "") | |
| if content: | |
| full_response += content | |
| yield full_response | |
| except json.JSONDecodeError: | |
| logger.warning(f"JSON parsing failed: {data_str}") | |
| continue | |
| except Exception as e: | |
| logger.error(f"LLM API error: {str(e)}") | |
| yield f"โ ๏ธ Error generating content: {str(e)}" | |
| ############################################################################## | |
| # Main PPT Generation Function - IMPROVED VERSION with Enhanced Features | |
| ############################################################################## | |
| def generate_ppt( | |
| topic: str, | |
| num_slides: int = 10, | |
| use_web_search: bool = False, | |
| use_korean: bool = True, | |
| reference_files: list = None, | |
| design_theme: str = "professional", | |
| font_style: str = "modern", | |
| layout_style: str = "consistent", | |
| include_charts: bool = False, | |
| include_ai_image: bool = False, | |
| include_diagrams: bool = False, | |
| include_flux_images: bool = False | |
| ) -> tuple: | |
| """Main function to generate PPT with advanced design and enhanced visuals""" | |
| if not PPTX_AVAILABLE: | |
| return None, "โ python-pptx ๋ผ์ด๋ธ๋ฌ๋ฆฌ๊ฐ ์ค์น๋์ง ์์์ต๋๋ค.\n\n์ค์น ๋ช ๋ น: pip install python-pptx", "" | |
| if not topic.strip(): | |
| return None, "โ PPT ์ฃผ์ ๋ฅผ ์ ๋ ฅํด์ฃผ์ธ์.", "" | |
| if num_slides < 3 or num_slides > 20: | |
| return None, "โ ์ฌ๋ผ์ด๋ ์๋ 3์ฅ ์ด์ 20์ฅ ์ดํ๋ก ์ค์ ํด์ฃผ์ธ์.", "" | |
| try: | |
| # 3D ์คํ์ผ API ์ด๊ธฐํ (ํ์ง ์ด๋ฏธ์ง์ฉ) | |
| if include_ai_image and not AI_IMAGE_ENABLED: | |
| yield None, "๐ 3D ์คํ์ผ ์ด๋ฏธ์ง ์์ฑ API์ ์ฐ๊ฒฐํ๋ ์ค...", "" | |
| if initialize_ai_image_api(): | |
| yield None, "โ 3D ์คํ์ผ API ์ฐ๊ฒฐ ์ฑ๊ณต!", "" | |
| else: | |
| include_ai_image = False | |
| yield None, "โ ๏ธ 3D ์คํ์ผ API ์ฐ๊ฒฐ ์คํจ. AI ์ด๋ฏธ์ง ์์ด ์งํํฉ๋๋ค.", "" | |
| # FLUX API ์ด๊ธฐํ (ํฌํ ๋ฆฌ์ผ๋ฆฌ์คํฑ ์ด๋ฏธ์ง์ฉ) | |
| if (include_ai_image or include_flux_images) and not FLUX_API_ENABLED: | |
| yield None, "๐ FLUX ํฌํ ๋ฆฌ์ผ๋ฆฌ์คํฑ API์ ์ฐ๊ฒฐํ๋ ์ค...", "" | |
| if initialize_flux_api(): | |
| yield None, "โ FLUX API ์ฐ๊ฒฐ ์ฑ๊ณต!", "" | |
| else: | |
| if include_ai_image and not AI_IMAGE_ENABLED: | |
| include_ai_image = False | |
| include_flux_images = False | |
| yield None, "โ ๏ธ FLUX API ์ฐ๊ฒฐ ์คํจ. ํฌํ ๋ฆฌ์ผ๋ฆฌ์คํฑ ์ด๋ฏธ์ง ์์ด ์งํํฉ๋๋ค.", "" | |
| # ๋ค์ด์ด๊ทธ๋จ ์์ฑ๊ธฐ ํ์ธ | |
| if include_diagrams and not DIAGRAM_GENERATORS_AVAILABLE: | |
| yield None, "โ ๏ธ ๋ค์ด์ด๊ทธ๋จ ์์ฑ๊ธฐ ๋ชจ๋์ ์ฐพ์ ์ ์์ต๋๋ค. ๋ค์ด์ด๊ทธ๋จ ์์ด ์งํํฉ๋๋ค.", "" | |
| include_diagrams = False | |
| # ํ๊ธ ํฐํธ ํ์ธ | |
| if include_diagrams and not os.path.exists(KOREAN_FONT_PATH): | |
| yield None, f"โ ๏ธ ํ๊ธ ํฐํธ๋ฅผ ์ฐพ์ ์ ์์ต๋๋ค: {KOREAN_FONT_PATH}\n๋ค์ด์ด๊ทธ๋จ์ ํ๊ธ์ด ๊นจ์ง ์ ์์ต๋๋ค.", "" | |
| # Process reference files if provided | |
| additional_context = "" | |
| chart_data = None | |
| if reference_files: | |
| file_contents = [] | |
| for file_path in reference_files: | |
| if file_path.lower().endswith(".csv"): | |
| csv_content = analyze_csv_file(file_path) | |
| file_contents.append(csv_content) | |
| # Extract chart data if available | |
| if "CHART_DATA:" in csv_content: | |
| chart_json = csv_content.split("CHART_DATA:")[1] | |
| try: | |
| chart_data = json.loads(chart_json) | |
| except: | |
| pass | |
| elif file_path.lower().endswith(".txt"): | |
| file_contents.append(analyze_txt_file(file_path)) | |
| elif file_path.lower().endswith(".pdf"): | |
| file_contents.append(pdf_to_markdown(file_path)) | |
| if file_contents: | |
| additional_context = "\n\n".join(file_contents) | |
| # Web search if enabled | |
| if use_web_search: | |
| search_query = extract_keywords(topic, top_k=5) | |
| search_results = do_web_search(search_query, use_korean=use_korean) | |
| if not search_results.startswith("Web search"): | |
| additional_context += f"\n\n{search_results}" | |
| # Generate PPT content | |
| llm_response = "" | |
| for response in generate_ppt_content(topic, num_slides, additional_context, use_korean, layout_style): | |
| llm_response = response | |
| yield None, f"๐ ์์ฑ ์ค...\n\n{response}", response | |
| # Parse LLM response | |
| slides_data = parse_llm_ppt_response(llm_response, layout_style) | |
| # Debug: ํ์ฑ๋ ๊ฐ ์ฌ๋ผ์ด๋ ๋ด์ฉ ์ถ๋ ฅ | |
| logger.info(f"=== Parsed Slides Debug Info ===") | |
| for i, slide in enumerate(slides_data): | |
| logger.info(f"Slide {i+1}:") | |
| logger.info(f" Title: {slide.get('title', 'NO TITLE')}") | |
| logger.info(f" Content: {slide.get('content', 'NO CONTENT')[:100]}...") | |
| logger.info(f" Content Length: {len(slide.get('content', ''))}") | |
| logger.info("---") | |
| # Add chart data to relevant slides if available | |
| if chart_data and include_charts: | |
| for slide in slides_data: | |
| if '๋ฐ์ดํฐ' in slide.get('title', '') or 'data' in slide.get('title', '').lower(): | |
| slide['chart_data'] = chart_data | |
| break | |
| # Debug logging | |
| logger.info(f"Parsed {len(slides_data)} slides from LLM response") | |
| logger.info(f"Design theme: {design_theme}, Layout style: {layout_style}") | |
| logger.info(f"Include diagrams: {include_diagrams}, Include FLUX images: {include_flux_images}") | |
| if not slides_data: | |
| # Show the raw response for debugging | |
| error_msg = "โ PPT ๋ด์ฉ ํ์ฑ์ ์คํจํ์ต๋๋ค.\n\n" | |
| error_msg += "LLM ์๋ต์ ํ์ธํด์ฃผ์ธ์:\n" | |
| error_msg += "=" * 50 + "\n" | |
| error_msg += llm_response[:500] + "..." if len(llm_response) > 500 else llm_response | |
| yield None, error_msg, llm_response | |
| return | |
| # AI ์ด๋ฏธ์ง ๋ฐ ๋ค์ด์ด๊ทธ๋จ ์์ฑ ์๋ฆผ | |
| visual_features = [] | |
| if include_ai_image and (AI_IMAGE_ENABLED or FLUX_API_ENABLED): | |
| visual_features.append("AI 3D ํ์ง ์ด๋ฏธ์ง") | |
| if include_diagrams and DIAGRAM_GENERATORS_AVAILABLE: | |
| visual_features.append("๋ค์ด์ด๊ทธ๋จ (์ต๋ 2๊ฐ)") | |
| if include_flux_images and FLUX_API_ENABLED: | |
| visual_features.append("AI ์์ฑ ์ด๋ฏธ์ง (๊ฐ API๋ณ ์ต๋ 3๊ฐ)") | |
| if visual_features: | |
| yield None, f"๐ ์ฌ๋ผ์ด๋ ์์ฑ ์๋ฃ!\n\n๐จ ์์ฑ ์ค: {', '.join(visual_features)}... (์๊ฐ์ด ์์๋ ์ ์์ต๋๋ค)", llm_response | |
| # Create PPT file with advanced design | |
| ppt_path = create_advanced_ppt_from_content( | |
| slides_data, | |
| topic, | |
| design_theme, | |
| include_charts, | |
| include_ai_image, | |
| include_diagrams, | |
| include_flux_images | |
| ) | |
| success_msg = f"โ PPT ํ์ผ์ด ์ฑ๊ณต์ ์ผ๋ก ์์ฑ๋์์ต๋๋ค!\n\n" | |
| success_msg += f"๐ ์ฃผ์ : {topic}\n" | |
| success_msg += f"๐ ์ฌ๋ผ์ด๋ ์: {len(slides_data)}์ฅ\n" | |
| success_msg += f"๐จ ๋์์ธ ํ ๋ง: {DESIGN_THEMES[design_theme]['name']}\n" | |
| success_msg += f"๐ ๋ ์ด์์ ์คํ์ผ: {layout_style}\n" | |
| if include_ai_image and (AI_IMAGE_ENABLED or FLUX_API_ENABLED): | |
| success_msg += f"๐ผ๏ธ AI ์์ฑ ํ์ง ์ด๋ฏธ์ง ํฌํจ\n" | |
| if include_diagrams and DIAGRAM_GENERATORS_AVAILABLE: | |
| success_msg += f"๐ AI ์์ฑ ๋ค์ด์ด๊ทธ๋จ ํฌํจ (์ต๋ 2๊ฐ)\n" | |
| if include_flux_images and FLUX_API_ENABLED: | |
| success_msg += f"๐จ AI ์์ฑ ์ฌ๋ผ์ด๋ ์ด๋ฏธ์ง ํฌํจ (API๋ณ ์ต๋ 3๊ฐ)\n" | |
| success_msg += f"๐ ์์ฑ๋ ์ฌ๋ผ์ด๋:\n" | |
| for i, slide in enumerate(slides_data[:5]): # Show first 5 slides | |
| success_msg += f" {i+1}. {slide.get('title', '์ ๋ชฉ ์์')} [{slide.get('layout', 'standard')}]\n" | |
| if slide.get('notes'): | |
| success_msg += f" ๐ก ๋ ธํธ: {slide.get('notes', '')[:50]}...\n" | |
| if len(slides_data) > 5: | |
| success_msg += f" ... ์ธ {len(slides_data) - 5}์ฅ" | |
| yield ppt_path, success_msg, llm_response | |
| except Exception as e: | |
| logger.error(f"PPT generation error: {str(e)}") | |
| import traceback | |
| error_details = traceback.format_exc() | |
| logger.error(f"Error details: {error_details}") | |
| yield None, f"โ PPT ์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}\n\n์์ธ ์ค๋ฅ:\n{error_details}", "" | |
| ############################################################################## | |
| # Gradio UI | |
| ############################################################################## | |
| css = """ | |
| /* Full width UI */ | |
| .gradio-container { | |
| background: rgba(255, 255, 255, 0.98); | |
| padding: 40px 50px; | |
| margin: 30px auto; | |
| width: 100% !important; | |
| max-width: 1400px !important; | |
| border-radius: 20px; | |
| box-shadow: 0 10px 30px rgba(0, 0, 0, 0.1); | |
| } | |
| /* Background */ | |
| body { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| margin: 0; | |
| padding: 0; | |
| font-family: 'Segoe UI', 'Helvetica Neue', Arial, sans-serif; | |
| } | |
| /* Title styling */ | |
| h1 { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| background-clip: text; | |
| -webkit-background-clip: text; | |
| -webkit-text-fill-color: transparent; | |
| font-weight: 700; | |
| margin-bottom: 10px; | |
| } | |
| /* Button styles */ | |
| button.primary { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; | |
| border: none; | |
| color: white !important; | |
| font-weight: 600; | |
| padding: 15px 30px !important; | |
| font-size: 18px !important; | |
| transition: all 0.3s ease; | |
| text-transform: uppercase; | |
| letter-spacing: 1px; | |
| } | |
| button.primary:hover { | |
| transform: translateY(-3px); | |
| box-shadow: 0 8px 20px rgba(102, 126, 234, 0.4); | |
| } | |
| /* Input styles */ | |
| .textbox, textarea, input[type="text"], input[type="number"] { | |
| border: 2px solid #e5e7eb; | |
| border-radius: 12px; | |
| padding: 15px; | |
| font-size: 16px; | |
| transition: all 0.3s ease; | |
| background: white; | |
| } | |
| .textbox:focus, textarea:focus, input[type="text"]:focus { | |
| border-color: #667eea; | |
| outline: none; | |
| box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1); | |
| } | |
| /* Card style */ | |
| .card { | |
| background: white; | |
| border-radius: 16px; | |
| padding: 25px; | |
| box-shadow: 0 4px 15px rgba(0, 0, 0, 0.08); | |
| margin-bottom: 25px; | |
| border: 1px solid rgba(102, 126, 234, 0.1); | |
| } | |
| /* Dropdown styles */ | |
| .dropdown { | |
| border: 2px solid #e5e7eb; | |
| border-radius: 12px; | |
| padding: 12px; | |
| background: white; | |
| transition: all 0.3s ease; | |
| } | |
| .dropdown:hover { | |
| border-color: #667eea; | |
| } | |
| /* Slider styles */ | |
| .gr-slider input[type="range"] { | |
| background: linear-gradient(to right, #667eea 0%, #764ba2 100%); | |
| height: 8px; | |
| border-radius: 4px; | |
| } | |
| /* File upload area */ | |
| .file-upload { | |
| border: 3px dashed #667eea; | |
| border-radius: 16px; | |
| padding: 40px; | |
| text-align: center; | |
| transition: all 0.3s ease; | |
| background: rgba(102, 126, 234, 0.02); | |
| } | |
| .file-upload:hover { | |
| border-color: #764ba2; | |
| background: rgba(102, 126, 234, 0.05); | |
| transform: scale(1.01); | |
| } | |
| /* Checkbox styles */ | |
| input[type="checkbox"] { | |
| width: 20px; | |
| height: 20px; | |
| margin-right: 10px; | |
| cursor: pointer; | |
| } | |
| /* Tab styles */ | |
| .tabs { | |
| border-radius: 12px; | |
| overflow: hidden; | |
| margin-bottom: 20px; | |
| } | |
| .tab-nav { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| padding: 5px; | |
| } | |
| .tab-nav button { | |
| background: transparent; | |
| color: white; | |
| border: none; | |
| padding: 10px 20px; | |
| margin: 0 5px; | |
| border-radius: 8px; | |
| transition: all 0.3s ease; | |
| } | |
| .tab-nav button.selected { | |
| background: white; | |
| color: #667eea; | |
| } | |
| /* Section headers */ | |
| .section-header { | |
| font-size: 20px; | |
| font-weight: 600; | |
| color: #667eea; | |
| margin: 20px 0 15px 0; | |
| padding-bottom: 10px; | |
| border-bottom: 2px solid rgba(102, 126, 234, 0.2); | |
| } | |
| /* Status box styling */ | |
| .status-box { | |
| background: linear-gradient(135deg, rgba(102, 126, 234, 0.1) 0%, rgba(118, 75, 162, 0.1) 100%); | |
| border-radius: 12px; | |
| padding: 20px; | |
| } | |
| /* Preview box styling */ | |
| .preview-box { | |
| background: #f8f9fa; | |
| border-radius: 12px; | |
| padding: 20px; | |
| font-family: 'Courier New', monospace; | |
| font-size: 13px; | |
| line-height: 1.5; | |
| max-height: 500px; | |
| overflow-y: auto; | |
| } | |
| /* ๋ฐ์ํ ๋์์ธ */ | |
| @media (max-width: 768px) { | |
| .main-container { | |
| padding: 15px; | |
| margin: 10px; | |
| } | |
| .header-section h1 { | |
| font-size: 2em; | |
| } | |
| .gr-tab-item { | |
| padding: 10px 15px; | |
| font-size: 1em; | |
| } | |
| } | |
| """ | |
| with gr.Blocks(css=css, title="AI PPT Generator Pro") as demo: | |
| gr.Markdown( | |
| """ | |
| # ๐ฏ AI ๊ธฐ๋ฐ PPT ์๋ ์์ฑ ์์คํ Pro | |
| ๊ณ ๊ธ ๋์์ธ ํ ๋ง์ ๋ ์ด์์์ ํ์ฉํ ์ ๋ฌธ์ ์ธ ํ๋ ์ ํ ์ด์ ์ ์๋์ผ๋ก ์์ฑํฉ๋๋ค. | |
| 6๊ฐ์ง ๋ค์ด์ด๊ทธ๋จ ํ์ ๊ณผ AI ์์ฑ ์ด๋ฏธ์ง๋ฅผ ํฌํจํ์ฌ ์๊ฐ์ ์ผ๋ก ํ๋ถํ PPT๋ฅผ ๋ง๋ญ๋๋ค. | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| topic_input = gr.Textbox( | |
| label="๐ PPT ์ฃผ์ ", | |
| placeholder="์: ์ธ๊ณต์ง๋ฅ์ ๋ฏธ๋์ ์ฐ์ ์ ์ฉ ์ฌ๋ก", | |
| lines=2, | |
| elem_classes="card" | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| num_slides = gr.Slider( | |
| label="๐ ์ฌ๋ผ์ด๋ ์", | |
| minimum=3, | |
| maximum=20, | |
| step=1, | |
| value=10, | |
| info="์์ฑํ ์ฌ๋ผ์ด๋ ๊ฐ์ (3-20์ฅ)" | |
| ) | |
| with gr.Column(): | |
| use_korean = gr.Checkbox( | |
| label="๐ฐ๐ท ํ๊ตญ์ด", | |
| value=True, | |
| info="ํ๊ตญ์ด๋ก ์์ฑ" | |
| ) | |
| use_web_search = gr.Checkbox( | |
| label="๐ ์น ๊ฒ์", | |
| value=False, | |
| info="์ต์ ์ ๋ณด ๊ฒ์" | |
| ) | |
| # Design Options Section | |
| gr.Markdown("<div class='section-header'>๐จ ๋์์ธ ์ต์ </div>") | |
| with gr.Row(): | |
| design_theme = gr.Dropdown( | |
| label="๋์์ธ ํ ๋ง", | |
| choices=[ | |
| ("ํ๋กํ์ ๋ (ํ๋/ํ์)", "professional"), | |
| ("๋ชจ๋ (๋ณด๋ผ/ํํฌ)", "modern"), | |
| ("์์ฐ (์ด๋ก/๊ฐ์)", "nature"), | |
| ("ํฌ๋ฆฌ์์ดํฐ๋ธ (๋ค์ฑ๋ก์ด)", "creative"), | |
| ("๋ฏธ๋๋ฉ (ํ๋ฐฑ)", "minimal") | |
| ], | |
| value="professional", | |
| elem_classes="dropdown" | |
| ) | |
| layout_style = gr.Dropdown( | |
| label="๋ ์ด์์ ์คํ์ผ", | |
| choices=[ | |
| ("์ผ๊ด๋ ๋ ์ด์์", "consistent"), | |
| ("๋ค์ํ ๋ ์ด์์", "varied"), | |
| ("๋ฏธ๋๋ฉ ๋ ์ด์์", "minimal") | |
| ], | |
| value="consistent", | |
| elem_classes="dropdown" | |
| ) | |
| with gr.Row(): | |
| font_style = gr.Dropdown( | |
| label="ํฐํธ ์คํ์ผ", | |
| choices=[ | |
| ("๋ชจ๋", "modern"), | |
| ("ํด๋์", "classic"), | |
| ("์บ์ฃผ์ผ", "casual") | |
| ], | |
| value="modern", | |
| elem_classes="dropdown" | |
| ) | |
| include_charts = gr.Checkbox( | |
| label="๐ ์ฐจํธ ํฌํจ", | |
| value=False, | |
| info="CSV ๋ฐ์ดํฐ๊ฐ ์์ ๊ฒฝ์ฐ ์ฐจํธ ์์ฑ" | |
| ) | |
| # Visual Enhancement Options | |
| gr.Markdown("<div class='section-header'>๐ผ๏ธ ์๊ฐ์ ํฅ์ ์ต์ </div>") | |
| with gr.Row(): | |
| include_ai_image = gr.Checkbox( | |
| label="๐ผ๏ธ AI ํ์ง ์ด๋ฏธ์ง", | |
| value=False, | |
| info="FLUX๋ก ์์ฑํ ํ์ง ์ด๋ฏธ์ง ์ถ๊ฐ" | |
| ) | |
| include_diagrams = gr.Checkbox( | |
| label="๐ AI ๋ค์ด์ด๊ทธ๋จ", | |
| value=False, | |
| info="ํ์๋๊ฐ ๋์ 2๊ฐ ์ฌ๋ผ์ด๋์ ๋ค์ด์ด๊ทธ๋จ ์๋ ์์ฑ" | |
| ) | |
| include_flux_images = gr.Checkbox( | |
| label="๐จ ์ฌ๋ผ์ด๋ ์ด๋ฏธ์ง", | |
| value=False, | |
| info="์ฃผ์ ์ฌ๋ผ์ด๋์ FLUX ์ด๋ฏธ์ง ์ถ๊ฐ (API๋ณ ์ต๋ 3๊ฐ)" | |
| ) | |
| reference_files = gr.File( | |
| label="๐ ์ฐธ๊ณ ์๋ฃ (์ ํ์ฌํญ)", | |
| file_types=[".pdf", ".csv", ".txt"], | |
| file_count="multiple", | |
| elem_classes="file-upload" | |
| ) | |
| generate_btn = gr.Button( | |
| "๐ PPT ์์ฑํ๊ธฐ", | |
| variant="primary", | |
| size="lg" | |
| ) | |
| with gr.Column(scale=1): | |
| download_file = gr.File( | |
| label="๐ฅ ์์ฑ๋ PPT ๋ค์ด๋ก๋", | |
| interactive=False, | |
| elem_classes="card" | |
| ) | |
| status_text = gr.Textbox( | |
| label="๐ ์์ฑ ์ํ", | |
| lines=10, | |
| interactive=False, | |
| elem_classes="status-box" | |
| ) | |
| with gr.Row(): | |
| content_preview = gr.Textbox( | |
| label="๐ ์์ฑ๋ ๋ด์ฉ ๋ฏธ๋ฆฌ๋ณด๊ธฐ", | |
| lines=20, | |
| interactive=False, | |
| visible=True, | |
| elem_classes="preview-box" | |
| ) | |
| gr.Markdown( | |
| """ | |
| ### ๐ ์ฌ์ฉ ๋ฐฉ๋ฒ | |
| 1. **PPT ์ฃผ์ ์ ๋ ฅ**: ๊ตฌ์ฒด์ ์ธ ์ฃผ์ ์ผ์๋ก ๋ ์ข์ ๊ฒฐ๊ณผ | |
| 2. **์ฌ๋ผ์ด๋ ์ ์ ํ**: 3-20์ฅ ๋ฒ์์์ ์ ํ | |
| 3. **๋์์ธ ํ ๋ง ์ ํ**: 5๊ฐ์ง ์ ๋ฌธ์ ์ธ ํ ๋ง ์ค ์ ํ | |
| 4. **์๊ฐ์ ์ต์ ์ค์ **: AI ์ด๋ฏธ์ง, ๋ค์ด์ด๊ทธ๋จ, FLUX ์ด๋ฏธ์ง ์ถ๊ฐ | |
| 5. **์ฐธ๊ณ ์๋ฃ ์ ๋ก๋**: PDF, CSV, TXT ํ์ผ ์ง์ | |
| 6. **์์ฑ ๋ฒํผ ํด๋ฆญ**: AI๊ฐ ์๋์ผ๋ก PPT ์์ฑ | |
| ### ๐จ ๊ฐ์ ๋ ๊ธฐ๋ฅ - ์ต์ ํ๋ ์๊ฐ ์์ | |
| - **๋ค์ด์ด๊ทธ๋จ ๊ฐ์ ์ ํ**: ์ ์ฒด ์ฌ๋ผ์ด๋ ์ค ํ์๋๊ฐ ๊ฐ์ฅ ๋์ 2๊ฐ ์ฌ๋ผ์ด๋์๋ง ๋ค์ด์ด๊ทธ๋จ ์์ฑ | |
| - **์ด๋ฏธ์ง ๊ฐ์ ์ ํ**: ๊ฐ API๋ณ๋ก ์ต๋ 3๊ฐ์ฉ, ์ด 6๊ฐ์ AI ์์ฑ ์ด๋ฏธ์ง | |
| - **ํ๊ธ ํฐํธ ์ง์**: NanumGothic-Regular.ttf๋ฅผ ์ฌ์ฉํ์ฌ ๋ค์ด์ด๊ทธ๋จ์ ํ๊ธ ๊นจ์ง ๋ฌธ์ ํด๊ฒฐ | |
| - **์ค๋งํธ ๋ฐฐ์น**: ๋ค์ด์ด๊ทธ๋จ๊ณผ ์ด๋ฏธ์ง๋ ํ ์คํธ์ ๊ฒน์น์ง ์๋๋ก ์ฐ์ธก์ ์๋ ๋ฐฐ์น | |
| ### ๐ก ๊ณ ๊ธ ํ | |
| - **๋ค์ด์ด๊ทธ๋จ ์ฐ์ ์์**: ํ๋ก์ธ์ค, WBS, ๊ฐ๋ ๋ ๋ฑ์ ํค์๋๊ฐ ์๋ ์ฌ๋ผ์ด๋๊ฐ ์ฐ์ ์ ํ๋ฉ๋๋ค | |
| - **์ด๋ฏธ์ง ๋ถ๋ฐฐ**: 3D ์คํ์ผ๊ณผ ํฌํ ๋ฆฌ์ผ๋ฆฌ์คํฑ ์ด๋ฏธ์ง๊ฐ ๊ท ํ์๊ฒ ๋ถ๋ฐฐ๋ฉ๋๋ค | |
| - **ํ์ง ์ด๋ฏธ์ง**: ์ฃผ์ ์ ๋ง๋ ํ๋ฆฌ๋ฏธ์ ํ์ง ์ด๋ฏธ์ง๊ฐ ์๋ ์์ฑ๋ฉ๋๋ค | |
| - **ํ๊ธ ์ง์**: app.py์ ๊ฐ์ ๊ฒฝ๋ก์ NanumGothic-Regular.ttf ํ์ผ์ ๋ฐฐ์นํ์ธ์ | |
| """ | |
| ) | |
| # Examples | |
| gr.Examples( | |
| examples=[ | |
| ["์ธ๊ณต์ง๋ฅ์ ๋ฏธ๋์ ์ฐ์ ์ ์ฉ ์ฌ๋ก", 10, False, True, [], "professional", "modern", "consistent", False, True, True, False], | |
| ["2024๋ ๋์งํธ ๋ง์ผํ ํธ๋ ๋", 12, True, True, [], "modern", "modern", "consistent", False, True, True, True], | |
| ["๊ธฐํ๋ณํ์ ์ง์๊ฐ๋ฅํ ๋ฐ์ ", 15, True, True, [], "nature", "classic", "consistent", False, True, True, True], | |
| ["์คํํธ์ ์ฌ์ ๊ณํ์", 8, False, True, [], "creative", "modern", "varied", False, True, True, True], | |
| ["ํ๋ก์ ํธ ๊ด๋ฆฌ ๋ฐฉ๋ฒ๋ก ", 10, False, True, [], "professional", "modern", "consistent", False, False, True, False], | |
| ["๋จธ์ ๋ฌ๋ ํ๋ก์ธ์ค ๊ฐ์ด๋", 12, False, True, [], "modern", "modern", "consistent", False, False, True, False], | |
| ], | |
| inputs=[topic_input, num_slides, use_web_search, use_korean, reference_files, | |
| design_theme, font_style, layout_style, include_charts, include_ai_image, | |
| include_diagrams, include_flux_images], | |
| ) | |
| # Event handler | |
| generate_btn.click( | |
| fn=generate_ppt, | |
| inputs=[ | |
| topic_input, | |
| num_slides, | |
| use_web_search, | |
| use_korean, | |
| reference_files, | |
| design_theme, | |
| font_style, | |
| layout_style, | |
| include_charts, | |
| include_ai_image, | |
| include_diagrams, | |
| include_flux_images | |
| ], | |
| outputs=[download_file, status_text, content_preview] | |
| ) | |
| # Initialize APIs on startup | |
| if __name__ == "__main__": | |
| # ํ๊ธ ํฐํธ ํ์ธ | |
| if os.path.exists(KOREAN_FONT_PATH): | |
| logger.info(f"โ ํ๊ธ ํฐํธ ํ์ผ์ ์ฐพ์์ต๋๋ค: {KOREAN_FONT_PATH}") | |
| else: | |
| logger.warning(f"โ ๏ธ ํ๊ธ ํฐํธ ํ์ผ์ ์ฐพ์ ์ ์์ต๋๋ค: {KOREAN_FONT_PATH}") | |
| logger.warning("๋ค์ด์ด๊ทธ๋จ์์ ํ๊ธ์ด ๊นจ์ง ์ ์์ต๋๋ค. NanumGothic-Regular.ttf ํ์ผ์ app.py์ ๊ฐ์ ๊ฒฝ๋ก์ ๋ฐฐ์นํ์ธ์.") | |
| # Try to initialize APIs in parallel | |
| with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: | |
| futures = [] | |
| if AI_IMAGE_API_URL: | |
| futures.append(executor.submit(initialize_ai_image_api)) | |
| if FLUX_API_URL: | |
| futures.append(executor.submit(initialize_flux_api)) | |
| # Wait for all to complete | |
| for future in concurrent.futures.as_completed(futures): | |
| try: | |
| future.result() | |
| except Exception as e: | |
| logger.error(f"API initialization failed: {e}") | |
| # ๋ค์ด์ด๊ทธ๋จ ์์ฑ๊ธฐ ๋ชจ๋ ์ํ ํ์ธ | |
| if DIAGRAM_GENERATORS_AVAILABLE: | |
| logger.info("โ ๋ค์ด์ด๊ทธ๋จ ์์ฑ๊ธฐ ๋ชจ๋์ด ์ ์์ ์ผ๋ก ๋ก๋๋์์ต๋๋ค") | |
| logger.info("์ง์๋๋ ๋ค์ด์ด๊ทธ๋จ ํ์ : Process Flow, Concept Map, WBS, Radial, Synoptic Chart") | |
| else: | |
| logger.warning("โ ๏ธ ๋ค์ด์ด๊ทธ๋จ ์์ฑ๊ธฐ ๋ชจ๋์ ์ฐพ์ ์ ์์ต๋๋ค. ๋ค์ด์ด๊ทธ๋จ ๊ธฐ๋ฅ์ด ๋นํ์ฑํ๋ฉ๋๋ค.") | |
| demo.launch() |