Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| from bs4 import BeautifulSoup | |
| import json | |
| import base64 | |
| from PIL import Image | |
| import io | |
| import os | |
| from urllib.parse import urlparse | |
| import re | |
| def clean_url(url): | |
| """Ensure URL has proper protocol""" | |
| if not url.startswith(('http://', 'https://')): | |
| url = 'https://' + url | |
| return url | |
| def get_page_screenshot(url): | |
| """Get screenshot of the webpage using a screenshot API""" | |
| try: | |
| # Using a free screenshot API (limited, for demo purposes) | |
| api_url = f"https://api.apiflash.com/v1/urltoimage?access_key=demo&url={url}&format=jpeg&quality=80" | |
| response = requests.get(api_url) | |
| if response.status_code == 200: | |
| return response.content | |
| else: | |
| return None | |
| except Exception as e: | |
| print(f"Screenshot error: {str(e)}") | |
| return None | |
| def extract_styles(element): | |
| """Extract CSS styles from an element""" | |
| styles = {} | |
| if element.has_attr('style'): | |
| style_text = element.get('style') | |
| style_pairs = [s.strip() for s in style_text.split(';') if s.strip()] | |
| for pair in style_pairs: | |
| if ':' in pair: | |
| prop, val = pair.split(':', 1) | |
| styles[prop.strip()] = val.strip() | |
| return styles | |
| def extract_colors(styles): | |
| """Extract colors from CSS styles""" | |
| colors = [] | |
| color_props = ['color', 'background-color', 'border-color'] | |
| for prop in color_props: | |
| if prop in styles and styles[prop] not in ['transparent', 'inherit', 'initial']: | |
| colors.append(styles[prop]) | |
| return colors | |
| def extract_fonts(styles): | |
| """Extract fonts from CSS styles""" | |
| fonts = [] | |
| if 'font-family' in styles: | |
| font_str = styles['font-family'] | |
| # Split by comma and clean up quotes | |
| font_list = [f.strip().strip('"\'') for f in font_str.split(',')] | |
| fonts.extend(font_list) | |
| return fonts | |
| def process_website(url): | |
| """Process a website and extract design elements""" | |
| url = clean_url(url) | |
| try: | |
| # Fetch the webpage | |
| headers = { | |
| 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' | |
| } | |
| response = requests.get(url, headers=headers, timeout=15) | |
| if response.status_code != 200: | |
| return {"error": f"Failed to fetch website (Status code: {response.status_code})"} | |
| # Parse HTML | |
| soup = BeautifulSoup(response.text, 'html.parser') | |
| # Get screenshot | |
| screenshot = get_page_screenshot(url) | |
| screenshot_base64 = None | |
| if screenshot: | |
| screenshot_base64 = base64.b64encode(screenshot).decode('utf-8') | |
| # Extract text content | |
| text_elements = [] | |
| for text_tag in soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'span', 'a', 'button']): | |
| if text_tag.text.strip(): | |
| text_elements.append({ | |
| "tag": text_tag.name, | |
| "text": text_tag.text.strip(), | |
| "styles": extract_styles(text_tag) | |
| }) | |
| # Extract colors | |
| all_colors = [] | |
| for tag in soup.find_all(True): | |
| styles = extract_styles(tag) | |
| colors = extract_colors(styles) | |
| all_colors.extend(colors) | |
| # Extract fonts | |
| all_fonts = [] | |
| for tag in soup.find_all(True): | |
| styles = extract_styles(tag) | |
| fonts = extract_fonts(styles) | |
| all_fonts.extend(fonts) | |
| # Extract images | |
| images = [] | |
| for img in soup.find_all('img'): | |
| if img.has_attr('src'): | |
| src = img['src'] | |
| if src.startswith('//'): | |
| src = 'https:' + src | |
| elif not src.startswith(('http://', 'https://')): | |
| # Handle relative URLs | |
| base_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) | |
| if src.startswith('/'): | |
| src = base_url + src | |
| else: | |
| src = base_url + '/' + src | |
| images.append({ | |
| "src": src, | |
| "alt": img.get('alt', ''), | |
| "width": img.get('width', ''), | |
| "height": img.get('height', '') | |
| }) | |
| # Create result | |
| result = { | |
| "url": url, | |
| "title": soup.title.string if soup.title else "", | |
| "screenshot": screenshot_base64, | |
| "text_elements": text_elements[:50], # Limit to 50 elements | |
| "colors": list(set(all_colors))[:20], # Remove duplicates and limit | |
| "fonts": list(set(all_fonts))[:10], # Remove duplicates and limit | |
| "images": images[:20] # Limit to 20 images | |
| } | |
| return result | |
| except Exception as e: | |
| return {"error": str(e)} | |
| def figma_conversion_api(url): | |
| """API endpoint for Figma plugin to convert websites""" | |
| if not url: | |
| return {"error": "No URL provided"} | |
| result = process_website(url) | |
| return result | |
| # Create Gradio interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Website to Figma Converter API") | |
| with gr.Tab("API Endpoint"): | |
| gr.Markdown(""" | |
| ## API Usage | |
| This API provides website conversion functionality for the Figma plugin. | |
| ### Example usage: | |
| Send a POST request with a URL parameter to convert a website to Figma-compatible format. | |
| """) | |
| with gr.Tab("Test Interface"): | |
| with gr.Row(): | |
| url_input = gr.Textbox(label="Enter Website URL", placeholder="https://example.com") | |
| convert_button = gr.Button("Convert Website") | |
| output = gr.JSON(label="Conversion Result") | |
| convert_button.click(figma_conversion_api, inputs=url_input, outputs=output) | |
| # Mount the Gradio app | |
| demo.launch() | |