File size: 12,680 Bytes
e89bede
 
7e85dc1
adbe882
 
d2e8f75
e89bede
d2e8f75
 
0a45340
e89bede
adbe882
d2e8f75
adbe882
 
 
 
 
 
75028e1
adbe882
75028e1
d2e8f75
 
 
 
 
 
 
7e85dc1
d2e8f75
7e85dc1
 
d2e8f75
 
7e85dc1
 
d2e8f75
7e85dc1
 
 
d2e8f75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30ed6f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e85dc1
d2e8f75
7e85dc1
d2e8f75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e85dc1
75028e1
7e85dc1
 
 
 
 
d2e8f75
7e85dc1
d2e8f75
 
75028e1
 
d2e8f75
 
30ed6f9
 
 
 
 
 
 
 
 
 
 
 
 
 
d2e8f75
 
 
 
e89bede
7e85dc1
d2e8f75
e89bede
 
d2e8f75
 
e89bede
d2e8f75
7e85dc1
d2e8f75
 
 
 
 
 
 
 
 
 
7e85dc1
 
 
 
d2e8f75
75028e1
7e85dc1
d2e8f75
 
75028e1
 
d2e8f75
 
 
 
 
 
 
 
 
 
 
 
75028e1
d2e8f75
 
 
75028e1
 
7e85dc1
75028e1
7e85dc1
d2e8f75
 
 
 
 
 
75028e1
7e85dc1
 
e89bede
d2e8f75
 
 
 
 
 
7e85dc1
d2e8f75
 
 
 
 
7e85dc1
d2e8f75
 
 
 
7e85dc1
d2e8f75
5d565fb
7e85dc1
d2e8f75
7e85dc1
d2e8f75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
adbe882
 
e89bede
d2e8f75
adbe882
3fa7a1b
adbe882
 
e89bede
d2e8f75
 
 
 
 
 
e89bede
 
d2e8f75
 
75028e1
e89bede
7e85dc1
e89bede
7e85dc1
d2e8f75
 
 
 
 
 
 
 
5d565fb
75028e1
5d565fb
75028e1
e89bede
d2e8f75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d565fb
 
 
 
 
 
 
 
d2e8f75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e89bede
 
3cf32d4
e89bede
3cf32d4
e89bede
d2e8f75
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
import gradio as gr
import requests
import socket
import logging
import time
import re
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urljoin
from typing import Dict, Tuple, Optional
from google import genai

# -------------------------------------------------
# Logging setup
# -------------------------------------------------
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s | %(levelname)s | %(message)s",
)
logger = logging.getLogger(__name__)

logger.info("AI Website Review Tool starting up")

# -------------------------------------------------
# Constants
# -------------------------------------------------
TIMEOUT = 15
MAX_RETRIES = 2
CONTENT_LIMIT = 12000

# -----------------------------
# URL Validation & Normalization
# -----------------------------
def normalize_url(url: str) -> str:
    """Normalize and validate URL format."""
    url = url.strip()
    parsed = urlparse(url)
    if not parsed.scheme:
        url = "https://" + url
    return url


def validate_url(url: str) -> Tuple[bool, str]:
    """Validate URL format and accessibility."""
    try:
        parsed = urlparse(url)
        if not parsed.netloc:
            return False, "Invalid URL format. Please include domain name."
        
        # Check for obviously invalid domains
        if len(parsed.netloc) < 4 or '.' not in parsed.netloc:
            return False, "Invalid domain name."
        
        return True, ""
    except Exception as e:
        return False, f"URL validation error: {str(e)}"


# -----------------------------
# Proxy Option (if AFC blocks direct requests)
# -----------------------------
USE_PROXY = False  # Set to True if you need to use a proxy service

def fetch_via_proxy(url: str) -> str:
    """Fetch content via a proxy service (for AFC restrictions)."""
    # Option 1: ScraperAPI (free tier available)
    # proxy_url = f"http://api.scraperapi.com?api_key=YOUR_KEY&url={url}"
    
    # Option 2: WebScraping.AI (free tier available)
    # proxy_url = f"https://api.webscraping.ai/html?api_key=YOUR_KEY&url={url}"
    
    # Option 3: ScrapingBee (free tier available)
    proxy_url = f"https://app.scrapingbee.com/api/v1/?api_key=YOUR_KEY&url={url}"
    
    response = requests.get(proxy_url, timeout=30)
    response.raise_for_status()
    return response.text

# -----------------------------
# Enhanced Content Extraction
# -----------------------------
def extract_website_info(soup: BeautifulSoup, url: str) -> Dict[str, str]:
    """Extract key website elements for analysis."""
    info = {}
    
    # Title
    info['title'] = soup.title.string.strip() if soup.title else ""
    
    # Meta description
    meta_desc = soup.find("meta", attrs={"name": "description"})
    info['meta_description'] = meta_desc.get("content", "").strip() if meta_desc else ""
    
    # Headings
    info['h1'] = soup.find("h1").get_text(strip=True) if soup.find("h1") else ""
    h2_tags = soup.find_all("h2", limit=5)
    info['h2s'] = " | ".join([h2.get_text(strip=True) for h2 in h2_tags])
    
    # CTAs (buttons and prominent links)
    cta_patterns = ['button', 'btn', 'cta', 'call-to-action']
    ctas = []
    for pattern in cta_patterns:
        elements = soup.find_all(class_=re.compile(pattern, re.I))
        ctas.extend([el.get_text(strip=True) for el in elements[:3]])
    info['ctas'] = " | ".join(ctas[:5]) if ctas else "No clear CTAs found"
    
    # Contact information
    contact_indicators = soup.find_all(string=re.compile(r'contact|email|phone|call', re.I))
    info['has_contact'] = len(contact_indicators) > 0
    
    # Links analysis
    links = soup.find_all('a', href=True)
    info['total_links'] = len(links)
    external_links = [l for l in links if urlparse(l['href']).netloc and urlparse(l['href']).netloc != urlparse(url).netloc]
    info['external_links'] = len(external_links)
    
    return info


def fetch_website_text(url: str) -> Tuple[str, bool]:
    """
    Fetch and parse website content.
    Returns (content_string, success_boolean)
    """
    socket.setdefaulttimeout(TIMEOUT)

    headers = {
        "User-Agent": (
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
            "AppleWebKit/537.36 (KHTML, like Gecko) "
            "Chrome/121.0 Safari/537.36"
        ),
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        "Accept-Language": "en-US,en;q=0.9",
        "Accept-Encoding": "gzip, deflate",
        "DNT": "1",
    }

    for attempt in range(MAX_RETRIES):
        try:
            if USE_PROXY:
                html_content = fetch_via_proxy(url)
                soup = BeautifulSoup(html_content, "html.parser")
                break
            else:
                response = requests.get(
                    url,
                    headers=headers,
                    timeout=TIMEOUT,
                    allow_redirects=True,
                )
                response.raise_for_status()
                soup = BeautifulSoup(response.text, "html.parser")
                break
        except requests.exceptions.RequestException as e:
            if attempt == MAX_RETRIES - 1:
                raise
            time.sleep(1)

    # Remove noisy tags
    for tag in soup(["script", "style", "noscript", "iframe", "nav", "footer"]):
        tag.decompose()

    # Extract structured info
    info = extract_website_info(soup, url)

    # Body content
    body_text = " ".join(soup.stripped_strings)
    body_text = body_text[:CONTENT_LIMIT]

    content = f"""
PAGE TITLE: {info['title']}
META DESCRIPTION: {info['meta_description']}
PRIMARY H1: {info['h1']}
KEY H2 HEADINGS: {info['h2s']}
CALL-TO-ACTION BUTTONS: {info['ctas']}
CONTACT INFO PRESENT: {"Yes" if info['has_contact'] else "No"}
LINK ANALYSIS: {info['total_links']} total links, {info['external_links']} external

VISIBLE CONTENT:
{body_text}
"""
    return content, True


def fetch_website_text_safe(url: str) -> Tuple[str, bool]:
    """Safe wrapper that never crashes."""
    try:
        return fetch_website_text(url)
    except requests.exceptions.Timeout:
        return """⚠️ Website took too long to respond (timeout).
This might indicate slow server performance.
Analysis will be based on URL structure and general best practices.""", False
    except requests.exceptions.SSLError:
        return """⚠️ SSL Certificate error detected.
This is a major trust issue that should be fixed immediately.
Analysis will include this critical security concern.""", False
    except requests.exceptions.ConnectionError:
        return """⚠️ Could not connect to website.
Website may be down or have DNS issues.
Analysis will be based on general best practices.""", False
    except Exception as e:
        return f"""⚠️ Unable to fully fetch website content.
Error: {str(e)}
Analysis will be based on available information and general best practices.""", False


# -----------------------------
# Gemini Analysis
# -----------------------------
def analyze_website(api_key: str, url: str, industry: str, goal: str) -> str:
    """Main analysis function."""
    
    # Validate inputs
    if not api_key or len(api_key) < 20:
        return "❌ Please enter a valid Gemini API key. Get one at https://aistudio.google.com/apikey"

    if not url:
        return "❌ Please enter a website URL."

    # Normalize and validate URL
    url = normalize_url(url)
    is_valid, error_msg = validate_url(url)
    if not is_valid:
        return f"❌ {error_msg}"

    try:
        # Initialize client
        try:
            client = genai.Client(api_key=api_key)
        except Exception as e:
            return f"❌ Invalid API key. Please check your Gemini API key.\nError: {str(e)}"

        # Fetch website content
        website_text, fetch_success = fetch_website_text_safe(url)
        
        fetch_status = "βœ… Full content analysis" if fetch_success else "⚠️ Limited analysis"

        # Build enhanced prompt
        prompt = f"""You are an AI consultant with the company Esquire IT helping small businesses improve their websites.

Business Context:
- Industry: {industry}
- Primary Goal: {goal}
- URL: {url}
- Content Fetch Status: {fetch_status}

Analyze the website content below and provide a comprehensive business-focused review.

Structure your response with clear sections:

## 1. Messaging Clarity (Score: X/10)
**Main Issue:** [One sentence summary]
**Recommendations:**
- [Specific actionable item]
- [Specific actionable item]
- [Specific actionable item]

## 2. Conversion Effectiveness (Score: X/10)
**Main Issue:** [One sentence summary]
**Recommendations:**
- [Specific actionable item]
- [Specific actionable item]
- [Specific actionable item]

## 3. Trust & Credibility (Score: X/10)
**Main Issue:** [One sentence summary]
**Recommendations:**
- [Specific actionable item]
- [Specific actionable item]
- [Specific actionable item]

## 4. User Experience Issues
- [Issue 1]
- [Issue 2]
- [Issue 3]

## 5. AI & Automation Opportunities
For a {industry} business with limited tech resources:
- [Practical AI tool/solution #1]
- [Practical AI tool/solution #2]
- [Practical AI tool/solution #3]

## Summary
**Overall Score:** X/100
**Top 3 Priority Fixes:**
1. [Most urgent fix]
2. [Second priority]
3. [Third priority]

Use clear, non-technical language that a small business owner would understand.

Website Content:
{website_text}
"""

        # Generate analysis
        response = client.models.generate_content(
            model="gemini-2.5-flash-lite",
            contents=prompt,
        )

        result = f"# Analysis for {url}\n\n{response.text}"
        
        if not fetch_success:
            result += "\n\n---\n⚠️ **Note:** Analysis was performed with limited content due to website access issues."
        
        return result

    except Exception as e:
        logger.error(f"Analysis error: {str(e)}")
        return f"❌ Error during analysis: {str(e)}\n\nPlease check your API key and try again."


# -----------------------------
# Gradio UI
# -----------------------------
with gr.Blocks(
    title="AI Website Review Tool",
    theme=gr.themes.Soft(),
    css="""
        .gradio-container {max-width: 900px !important}
        #output {min-height: 500px}
    """
) as demo:
    gr.Markdown("# πŸ” Esquire IT AI Website Review Tool")
    gr.Markdown(
        "Get actionable insights to improve your small business website using Esquire IT's AI analysis."
    )

    with gr.Row():
        with gr.Column():
            api_key = gr.Textbox(
                label="πŸ”‘ Gemini API Key",
                placeholder="Paste your Gemini API key here",
                type="password",
                info="Get your free API key at https://aistudio.google.com/apikey",
            )

            url = gr.Textbox(
                label="🌐 Website URL",
                placeholder="example.com or https://example.com",
                info="Enter the homepage or any page you want analyzed",
            )
            
            # gr.Examples(
            #    examples=[
            #        ["https://www.stripe.com"],
            #        ["https://www.shopify.com"],
            #    ],
            #    inputs=url,
            #    label="Try example websites",
            # )

            with gr.Row():
                industry = gr.Dropdown(
                    label="🏒 Industry",
                    choices=[
                        "General SMB",
                        "Law Firm",
                        "Hospitality",
                        "Healthcare",
                        "Real Estate",
                        "E-commerce",
                        "Consulting",
                        "Restaurant",
                        "Fitness",
                        "Education",
                    ],
                    value="General SMB",
                )

                goal = gr.Dropdown(
                    label="🎯 Primary Goal",
                    choices=[
                        "Generate leads",
                        "Sell products",
                        "Sell services",
                        "Build credibility",
                        "Educate visitors",
                        "Book appointments",
                    ],
                    value="Generate leads",
                )

            analyze_btn = gr.Button("πŸš€ Analyze Website", variant="primary", size="lg")

    with gr.Row():
        output = gr.Markdown(elem_id="output")

    analyze_btn.click(
        fn=analyze_website,
        inputs=[api_key, url, industry, goal],
        outputs=output,
    )

demo.launch()