Spaces:
Sleeping
Sleeping
File size: 6,110 Bytes
8809763 76f26db b6dcd96 76f26db 30ea5f3 76f26db 8809763 76f26db 2c0c380 76f26db 2c0c380 76f26db 59f237a 76f26db 8809763 76f26db a479f18 76f26db 59f237a 76f26db 59f237a 76f26db b6dcd96 76f26db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
import gradio as gr
import requests
from bs4 import BeautifulSoup
import json
import base64
from PIL import Image
import io
import os
from urllib.parse import urlparse
import re
def clean_url(url):
"""Ensure URL has proper protocol"""
if not url.startswith(('http://', 'https://')):
url = 'https://' + url
return url
def get_page_screenshot(url):
"""Get screenshot of the webpage using a screenshot API"""
try:
# Using a free screenshot API (limited, for demo purposes)
api_url = f"https://api.apiflash.com/v1/urltoimage?access_key=demo&url={url}&format=jpeg&quality=80"
response = requests.get(api_url)
if response.status_code == 200:
return response.content
else:
return None
except Exception as e:
print(f"Screenshot error: {str(e)}")
return None
def extract_styles(element):
"""Extract CSS styles from an element"""
styles = {}
if element.has_attr('style'):
style_text = element.get('style')
style_pairs = [s.strip() for s in style_text.split(';') if s.strip()]
for pair in style_pairs:
if ':' in pair:
prop, val = pair.split(':', 1)
styles[prop.strip()] = val.strip()
return styles
def extract_colors(styles):
"""Extract colors from CSS styles"""
colors = []
color_props = ['color', 'background-color', 'border-color']
for prop in color_props:
if prop in styles and styles[prop] not in ['transparent', 'inherit', 'initial']:
colors.append(styles[prop])
return colors
def extract_fonts(styles):
"""Extract fonts from CSS styles"""
fonts = []
if 'font-family' in styles:
font_str = styles['font-family']
# Split by comma and clean up quotes
font_list = [f.strip().strip('"\'') for f in font_str.split(',')]
fonts.extend(font_list)
return fonts
def process_website(url):
"""Process a website and extract design elements"""
url = clean_url(url)
try:
# Fetch the webpage
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
response = requests.get(url, headers=headers, timeout=15)
if response.status_code != 200:
return {"error": f"Failed to fetch website (Status code: {response.status_code})"}
# Parse HTML
soup = BeautifulSoup(response.text, 'html.parser')
# Get screenshot
screenshot = get_page_screenshot(url)
screenshot_base64 = None
if screenshot:
screenshot_base64 = base64.b64encode(screenshot).decode('utf-8')
# Extract text content
text_elements = []
for text_tag in soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'span', 'a', 'button']):
if text_tag.text.strip():
text_elements.append({
"tag": text_tag.name,
"text": text_tag.text.strip(),
"styles": extract_styles(text_tag)
})
# Extract colors
all_colors = []
for tag in soup.find_all(True):
styles = extract_styles(tag)
colors = extract_colors(styles)
all_colors.extend(colors)
# Extract fonts
all_fonts = []
for tag in soup.find_all(True):
styles = extract_styles(tag)
fonts = extract_fonts(styles)
all_fonts.extend(fonts)
# Extract images
images = []
for img in soup.find_all('img'):
if img.has_attr('src'):
src = img['src']
if src.startswith('//'):
src = 'https:' + src
elif not src.startswith(('http://', 'https://')):
# Handle relative URLs
base_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url))
if src.startswith('/'):
src = base_url + src
else:
src = base_url + '/' + src
images.append({
"src": src,
"alt": img.get('alt', ''),
"width": img.get('width', ''),
"height": img.get('height', '')
})
# Create result
result = {
"url": url,
"title": soup.title.string if soup.title else "",
"screenshot": screenshot_base64,
"text_elements": text_elements[:50], # Limit to 50 elements
"colors": list(set(all_colors))[:20], # Remove duplicates and limit
"fonts": list(set(all_fonts))[:10], # Remove duplicates and limit
"images": images[:20] # Limit to 20 images
}
return result
except Exception as e:
return {"error": str(e)}
def figma_conversion_api(url):
"""API endpoint for Figma plugin to convert websites"""
if not url:
return {"error": "No URL provided"}
result = process_website(url)
return result
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Website to Figma Converter API")
with gr.Tab("API Endpoint"):
gr.Markdown("""
## API Usage
This API provides website conversion functionality for the Figma plugin.
### Example usage:
Send a POST request with a URL parameter to convert a website to Figma-compatible format.
""")
with gr.Tab("Test Interface"):
with gr.Row():
url_input = gr.Textbox(label="Enter Website URL", placeholder="https://example.com")
convert_button = gr.Button("Convert Website")
output = gr.JSON(label="Conversion Result")
convert_button.click(figma_conversion_api, inputs=url_input, outputs=output)
# Mount the Gradio app
demo.launch()
|