File size: 29,699 Bytes
6b25473
 
 
 
 
 
 
 
 
 
 
 
 
0ba165b
824ece5
0ba165b
 
 
 
 
6b25473
 
824ece5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b25473
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ba165b
6b25473
0ba165b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b25473
0ba165b
 
 
 
 
 
 
 
 
 
 
 
6b25473
 
 
 
 
0ba165b
6b25473
 
0ba165b
 
6b25473
 
 
 
 
 
0ba165b
 
 
6b25473
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe19411
 
0e7db85
fe19411
0e7db85
fe19411
0e7db85
 
81c820b
fe19411
 
 
 
 
 
 
81c820b
0e7db85
fe19411
 
 
0e7db85
fe19411
6b25473
 
 
 
 
 
666dd15
 
6b25473
 
 
 
4022ccc
 
 
 
6b25473
89491b0
6b25473
 
89491b0
6b25473
 
 
 
 
89491b0
6b25473
 
0ba165b
6b25473
 
0ba165b
6b25473
 
0ba165b
 
 
 
 
 
 
 
89491b0
0ba165b
6b25473
89491b0
6b25473
 
89491b0
6b25473
0ba165b
6b25473
 
0ba165b
89491b0
0ba165b
 
6b25473
 
0ba165b
6b25473
 
0ba165b
6b25473
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89491b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ba165b
6b25473
89491b0
6b25473
 
89491b0
6b25473
 
 
 
 
89491b0
 
 
 
 
 
 
 
 
 
 
0ba165b
6b25473
0ba165b
 
 
 
 
 
 
 
 
6b25473
e93085a
6b25473
 
 
0ba165b
6b25473
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ba165b
 
6b25473
 
 
 
 
4022ccc
 
6b25473
 
 
 
 
4022ccc
 
6b25473
89491b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b25473
0ba165b
6b25473
 
 
 
 
 
 
 
 
 
 
0ba165b
 
 
 
 
666dd15
6b25473
 
89491b0
6b25473
89491b0
6b25473
89491b0
6b25473
 
 
 
 
 
0ba165b
 
666dd15
6b25473
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
import gradio as gr
import asyncio
from langchain.text_splitter import RecursiveCharacterTextSplitter, HTMLHeaderTextSplitter
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from typing import List, Dict, Tuple
import pandas as pd
from dataclasses import dataclass
import json
import time
import warnings
import os
import re
import tempfile

# Trafilatura imports
from trafilatura import fetch_url, extract, bare_extraction
from trafilatura.downloads import fetch_url as trafilatura_fetch

warnings.filterwarnings('ignore')

# Global variable to store the latest vector data
latest_vector_data = None

def prepare_download(vector_df):
    """Prepare the vector data for download"""
    global latest_vector_data
    if vector_df is not None and not vector_df.empty:
        # Save to temporary file
        temp_file = tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False, newline='', encoding='utf-8')
        vector_df.to_csv(temp_file.name, index=False)
        latest_vector_data = temp_file.name
        return temp_file.name
    return None

def download_vector_data():
    """Return the prepared vector data file"""
    global latest_vector_data
    if latest_vector_data:
        return latest_vector_data
    return None

@dataclass
class ContentChunk:
    content: str
    url: str
    page_type: str  # 'client' or 'competitor'
    chunk_index: int
    chunk_type: str  # 'header_section', 'paragraph', or 'header_subsection'
    header_info: Dict = None  # Will store header level and text
    similarity_score: float = 0.0
    
@dataclass
class PageAnalysis:
    url: str
    page_type: str
    total_chunks: int
    avg_similarity: float
    max_similarity: float
    top_chunks: List[ContentChunk]

class SEOContentAnalyzer:
    def __init__(self, api_key: str):
        self.embeddings = OpenAIEmbeddings(
            model="text-embedding-3-small",
            openai_api_key=api_key
        )
        self.llm = ChatOpenAI(
            model="gpt-4o-mini", 
            temperature=0.3,
            openai_api_key=api_key
        )
        
        # Header-based splitter (first level)
        self.html_splitter = HTMLHeaderTextSplitter(
            headers_to_split_on=[
                ("h1", "Header 1"),
                ("h2", "Header 2"), 
                ("h3", "Header 3"),
                ("h4", "Header 4"),
                ("h5", "Header 5"),
                ("h6", "Header 6"),
            ]
        )
        
        # Paragraph-based splitter (second level)
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=600,
            chunk_overlap=100,
            separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""]
        )
        
        self.all_chunks = []
        self.keyword_embedding = None
        
    async def fetch_and_clean_html(self, url: str) -> Dict:
        """Fetch and clean HTML content from URL using Trafilatura"""
        try:
            # Use trafilatura to fetch the URL with custom settings
            downloaded = trafilatura_fetch(url)
            
            if not downloaded:
                return {'url': url, 'success': False, 'error': 'Failed to download'}
            
            # Extract text content using trafilatura
            text_content = extract(downloaded, include_comments=False, include_tables=True)
            
            if not text_content:
                return {'url': url, 'success': False, 'error': 'No content extracted'}
            
            # Extract with metadata to get title and other info
            metadata_result = bare_extraction(downloaded, include_comments=False, include_tables=True)
            
            # Handle Document object properly
            title = ''
            if metadata_result:
                if hasattr(metadata_result, 'title') and metadata_result.title:
                    title = metadata_result.title
                elif hasattr(metadata_result, 'get'):
                    title = metadata_result.get('title', '')
                else:
                    # Try to access as attribute
                    try:
                        title = getattr(metadata_result, 'title', '')
                    except:
                        title = ''
            
            # Extract HTML with formatting for header splitting
            html_content = extract(downloaded, output_format='xml', include_comments=False, include_tables=True)
            
            # Convert trafilatura XML to simple HTML for header splitting
            if html_content and len(html_content) > 100:
                # Simple conversion: replace XML tags with HTML equivalents
                html_for_splitting = html_content
                # Convert <head> tags to proper header tags
                html_for_splitting = re.sub(r'<head rend="(h[1-6])"[^>]*>', r'<\1>', html_for_splitting)
                html_for_splitting = re.sub(r'<head rend="h(\d)"[^>]*>', r'<h\1>', html_for_splitting)
                html_for_splitting = re.sub(r'</head>', '</h2>', html_for_splitting)
                html_for_splitting = re.sub(r'<head[^>]*>', '<h2>', html_for_splitting)
                # Wrap in div
                html_for_splitting = f"<div>{html_for_splitting}</div>"
            else:
                # Fallback: create simple HTML structure from text
                # Try to detect headers in plain text
                lines = text_content.split('\n')
                html_lines = []
                for line in lines:
                    line = line.strip()
                    if line:
                        # Simple heuristic: short lines that might be headers
                        if len(line) < 100 and len(line) > 5 and not line.endswith('.') and not line.endswith(',') and not line.endswith(';'):
                            # Check if it looks like a header (title case, shorter, etc.)
                            if line.istitle() or line.isupper() or (len(line.split()) <= 8):
                                html_lines.append(f"<h3>{line}</h3>")
                            else:
                                html_lines.append(f"<p>{line}</p>")
                        else:
                            html_lines.append(f"<p>{line}</p>")
                
                html_for_splitting = f"<div>{''.join(html_lines)}</div>"
            
            word_count = len(text_content.split())
            
            return {
                'url': url,
                'title': title,
                'text': text_content,
                'html': html_for_splitting,
                'success': True,
                'word_count': word_count
            }
                
        except Exception as e:
            return {'url': url, 'success': False, 'error': str(e)}
    
    async def crawl_all_urls(self, client_url: str, competitor_urls: List[str]) -> Dict:
        """Crawl client and competitor URLs using Trafilatura"""
        all_urls = [client_url] + competitor_urls
        
        # Since trafilatura is synchronous, we'll run them sequentially
        # but we can still use async structure for consistency
        crawl_data = {
            'client': None,
            'competitors': [],
            'failed_urls': []
        }
        
        for i, url in enumerate(all_urls):
            result = await self.fetch_and_clean_html(url)
            
            if not result.get('success'):
                crawl_data['failed_urls'].append(result['url'])
                continue
                
            if i == 0:  # First URL is client
                crawl_data['client'] = result
            else:
                crawl_data['competitors'].append(result)
        
        return crawl_data
    
    def chunk_content(self, crawl_data: Dict) -> List[ContentChunk]:
        """Chunk all content using header-first, then paragraph-level splitting"""
        all_chunks = []
        
        # Process client content
        if crawl_data['client']:
            client_chunks = self._chunk_single_page(
                crawl_data['client'], 'client'
            )
            all_chunks.extend(client_chunks)
        
        # Process competitor content
        for comp_data in crawl_data['competitors']:
            comp_chunks = self._chunk_single_page(comp_data, 'competitor')
            all_chunks.extend(comp_chunks)
        
        self.all_chunks = all_chunks
        return all_chunks
    
    def _chunk_single_page(self, page_data: Dict, page_type: str) -> List[ContentChunk]:
        """Chunk a single page using header + paragraph strategy"""
        chunks = []
        chunk_index = 0
        
        try:
            # Step 1: Try header-based splitting first
            if 'html' in page_data:
                header_splits = self.html_splitter.split_text(page_data['html'])
                
                if header_splits and len(header_splits) > 1:
                    # We found headers, process each section
                    for split in header_splits:
                        header_info = split.metadata if hasattr(split, 'metadata') else {}
                        content = split.page_content if hasattr(split, 'page_content') else str(split)
                        
                        # If header section is large, split it further by paragraphs
                        if len(content) > 800:
                            sub_chunks = self.text_splitter.split_text(content)
                            for i, sub_chunk in enumerate(sub_chunks):
                                if len(sub_chunk.strip()) > 50:
                                    chunks.append(ContentChunk(
                                        content=sub_chunk.strip(),
                                        url=page_data['url'],
                                        page_type=page_type,
                                        chunk_index=chunk_index,
                                        chunk_type='header_subsection',
                                        header_info=header_info
                                    ))
                                    chunk_index += 1
                        else:
                            # Small header section, keep as is
                            if len(content.strip()) > 50:
                                chunks.append(ContentChunk(
                                    content=content.strip(),
                                    url=page_data['url'],
                                    page_type=page_type,
                                    chunk_index=chunk_index,
                                    chunk_type='header_section',
                                    header_info=header_info
                                ))
                                chunk_index += 1
                else:
                    # No meaningful headers found, fall back to paragraph splitting
                    self._add_paragraph_chunks(page_data, page_type, chunks, chunk_index)
            else:
                # No HTML available, use text splitting
                self._add_paragraph_chunks(page_data, page_type, chunks, chunk_index)
                
        except Exception as e:
            self._add_paragraph_chunks(page_data, page_type, chunks, chunk_index)
        
        return chunks
    
    def _add_paragraph_chunks(self, page_data: Dict, page_type: str, chunks: List, start_index: int):
        """Add paragraph-level chunks as fallback"""
        text_chunks = self.text_splitter.split_text(page_data['text'])
        chunk_index = start_index
        
        for chunk_text in text_chunks:
            if len(chunk_text.strip()) > 50:
                chunks.append(ContentChunk(
                    content=chunk_text.strip(),
                    url=page_data['url'],
                    page_type=page_type,
                    chunk_index=chunk_index,
                    chunk_type='paragraph',
                    header_info={}
                ))
                chunk_index += 1
    
    async def calculate_similarities(self, keyword: str) -> List[ContentChunk]:
        """Calculate cosine similarity between chunks and keyword"""
        if not self.all_chunks:
            raise ValueError("No chunks available. Run chunk_content first.")
        
        # Create embeddings for keyword
        self.keyword_embedding = await self.embeddings.aembed_query(keyword)
        
        # Create embeddings for all chunks
        chunk_texts = [chunk.content for chunk in self.all_chunks]
        chunk_embeddings = await self.embeddings.aembed_documents(chunk_texts)
        
        # Calculate similarities
        similarities = cosine_similarity([self.keyword_embedding], chunk_embeddings)[0]
        
        # Update chunks with similarity scores
        for i, chunk in enumerate(self.all_chunks):
            chunk.similarity_score = float(similarities[i])
        
        # Sort by similarity score
        sorted_chunks = sorted(self.all_chunks, key=lambda x: x.similarity_score, reverse=True)
        
        return sorted_chunks
    
    def analyze_pages(self, sorted_chunks: List[ContentChunk]) -> Dict[str, PageAnalysis]:
        """Analyze performance by page"""
        # Group chunks by URL
        url_groups = {}
        for chunk in sorted_chunks:
            if chunk.url not in url_groups:
                url_groups[chunk.url] = []
            url_groups[chunk.url].append(chunk)
        
        page_analyses = {}
        for url, chunks in url_groups.items():
            page_type = chunks[0].page_type
            similarities = [chunk.similarity_score for chunk in chunks]
            
            analysis = PageAnalysis(
                url=url,
                page_type=page_type,
                total_chunks=len(chunks),
                avg_similarity=np.mean(similarities),
                max_similarity=np.max(similarities),
                top_chunks=sorted(chunks, key=lambda x: x.similarity_score, reverse=True)[:3]
            )
            
            page_analyses[url] = analysis
        
        return page_analyses
    
    async def generate_report(self, keyword: str, page_analyses: Dict[str, PageAnalysis], 
                            sorted_chunks: List[ContentChunk]) -> str:
        """Generate comprehensive SEO report"""
        # Prepare data for LLM
        client_analysis = next((p for p in page_analyses.values() if p.page_type == 'client'), None)
        competitor_analyses = [p for p in page_analyses.values() if p.page_type == 'competitor']
        
        # Get top performing content
        top_chunks = sorted_chunks[:5]
        client_top_chunks = [c for c in sorted_chunks if c.page_type == 'client'][:3]
        competitor_top_chunks = [c for c in sorted_chunks if c.page_type == 'competitor'][:5]
        
        # Format client analysis data safely
        client_url = client_analysis.url if client_analysis else 'No client data'
        client_chunks = client_analysis.total_chunks if client_analysis else 0
        client_avg = f"{client_analysis.avg_similarity:.4f}" if client_analysis else "0.0000"
        client_max = f"{client_analysis.max_similarity:.4f}" if client_analysis else "0.0000"
        
        # Create prompt for LLM
        prompt = f"""
        As an SEO expert, analyze this content relevance data for the keyword "{keyword}" and provide actionable insights.

        CLIENT PAGE PERFORMANCE:
        URL: {client_url}
        Total Chunks: {client_chunks}
        Average Similarity: {client_avg}
        Max Similarity: {client_max}

        TOP CLIENT CONTENT SECTIONS:
        {chr(10).join([f"Score {c.similarity_score:.4f}: {c.content[:200]}..." for c in client_top_chunks[:3]])}

        COMPETITOR PERFORMANCE:
        {chr(10).join([f"URL: {p.url}, Avg: {p.avg_similarity:.4f}, Max: {p.max_similarity:.4f}" for p in competitor_analyses])}

        TOP COMPETITOR CONTENT SECTIONS:
        {chr(10).join([f"Score {c.similarity_score:.4f} ({c.url}): {c.content[:200]}..." for c in competitor_top_chunks[:3]])}

        OVERALL TOP PERFORMING CONTENT:
        {chr(10).join([f"Score {c.similarity_score:.4f} ({c.page_type}): {c.content[:150]}..." for c in top_chunks])}

        1.	Top-performing page for this keyword: Identify the strongest-ranking page (ours or a competitor’s), including its URL and why it performs well.
    	2.	Best-performing sections of content: Highlight the specific sections or content chunks (with text snippets and scores) that perform best for the keyword.
    	3.	What our client’s page does well: Summarize the client page’s strengths compared to competitors.
    	4.	What our client’s page is missing: Identify gaps or underdeveloped areas in the client’s content compared to competitors.
    	5.	Specific, actionable recommendations:
            Break this section into clearly labeled subcategories, such as:
            	•	Content Expansion: Missing sections, new topics, or deeper explanations.
            	•	Content Enhancement: Improvements to clarity, examples, visuals, or formatting.

        For each recommendation, include:
    	•	A clear title.
    	•	A brief explanation of why it matters.
    	•	A reference to the competitor content that demonstrates the point, including:
    	•	URL
    	•	Score
    	•	Content chunk or snippet

        Output format:
    	•	Use clear section headings and bullet points for readability.
    	•	Include competitor references (URL, score, snippet) wherever applicable to support recommendations.
    	•	Focus only on content-related improvements, not general SEO optimizations or monitoring advice.

        The goal is to help the client improve content relevance, depth, and authority for the target keyword — grounded in the analysis of vector embeddings and competitive content.

        """
        
        response = await self.llm.ainvoke(prompt)
        return response.content



# Gradio Interface Functions
async def run_seo_analysis(api_key: str, keyword: str, client_url: str, competitor_urls_text: str, progress=gr.Progress()):
    """Main function to run SEO analysis"""
    
    # Create empty dataframes for error cases
    empty_summary_df = pd.DataFrame(columns=["URL", "Type", "Total Chunks", "Avg Similarity", "Max Similarity"])
    empty_content_df = pd.DataFrame(columns=["Rank", "Type", "Score", "Content Preview", "URL"])
    
    if not api_key:
        return "❌ Please provide your OpenAI API key", empty_summary_df, empty_content_df, empty_summary_df
    
    if not keyword or not client_url:
        return "❌ Please provide both keyword and client URL", empty_summary_df, empty_content_df, empty_summary_df
    
    # Parse competitor URLs
    competitor_urls = [url.strip() for url in competitor_urls_text.split('\n') if url.strip()]
    
    if not competitor_urls:
        return "❌ Please provide at least one competitor URL", empty_summary_df, empty_content_df, empty_summary_df
    
    try:
        progress(0.1, desc="Initializing analyzer with Trafilatura...")
        analyzer = SEOContentAnalyzer(api_key)
        
        progress(0.2, desc="Crawling websites with enhanced extraction...")
        crawl_data = await analyzer.crawl_all_urls(client_url, competitor_urls)
        
        # Check if we have any successful crawls
        total_successful = 0
        if crawl_data['client']:
            total_successful += 1
        total_successful += len(crawl_data['competitors'])
        
        if total_successful == 0:
            failed_urls = ', '.join(crawl_data['failed_urls'][:3])
            return f"❌ No URLs were successfully crawled. Failed URLs: {failed_urls}...", empty_summary_df, empty_content_df, empty_summary_df
        
        if not crawl_data['client']:
            return "❌ Failed to crawl client URL", empty_summary_df, empty_content_df, empty_summary_df
        
        if not crawl_data['competitors']:
            return "❌ Failed to crawl any competitor URLs", empty_summary_df, empty_content_df, empty_summary_df
        
        progress(0.4, desc="Processing content with intelligent chunking...")
        chunks = analyzer.chunk_content(crawl_data)
        
        if not chunks:
            return "❌ No content chunks were created from the crawled pages", empty_summary_df, empty_content_df, empty_summary_df
        
        progress(0.6, desc="Calculating semantic similarities...")
        sorted_chunks = await analyzer.calculate_similarities(keyword)
        
        progress(0.8, desc="Analyzing page performance...")
        page_analyses = analyzer.analyze_pages(sorted_chunks)
        
        progress(0.9, desc="Generating AI-powered SEO report...")
        report = await analyzer.generate_report(keyword, page_analyses, sorted_chunks)
        
        # Create summary data
        summary_data = []
        for url, analysis in page_analyses.items():
            summary_data.append({
                'URL': url,
                'Type': analysis.page_type.title(),
                'Total Chunks': analysis.total_chunks,
                'Avg Similarity': f"{analysis.avg_similarity:.4f}",
                'Max Similarity': f"{analysis.max_similarity:.4f}"
            })
        
        summary_df = pd.DataFrame(summary_data)
        
        # Create top content data
        top_content_data = []
        for i, chunk in enumerate(sorted_chunks[:10], 1):
            top_content_data.append({
                'Rank': i,
                'Type': chunk.page_type.title(),
                'Score': f"{chunk.similarity_score:.4f}",
                'Content Preview': chunk.content[:150] + "..." if len(chunk.content) > 150 else chunk.content,
                'URL': chunk.url
            })
        
        top_content_df = pd.DataFrame(top_content_data)
        
        # Create comprehensive vector data for download (similar to Colab export)
        vector_data = []
        for chunk in sorted_chunks:
            vector_data.append({
                'url': chunk.url,
                'page_type': chunk.page_type,
                'chunk_index': chunk.chunk_index,
                'chunk_type': chunk.chunk_type,
                'header_info': str(chunk.header_info) if chunk.header_info else '',
                'similarity_score': chunk.similarity_score,
                'content_preview': chunk.content[:100] + '...' if len(chunk.content) > 100 else chunk.content,
                'content_length': len(chunk.content),
                'full_content': chunk.content  # Include full content for download
            })
        
        vector_df = pd.DataFrame(vector_data)
        
        # Prepare download file
        download_file_path = prepare_download(vector_df)
        
        progress(1.0, desc="Analysis complete!")
        
        return report, summary_df, top_content_df, vector_df
        
    except Exception as e:
        return f"❌ Error during analysis: {str(e)}", empty_summary_df, empty_content_df, empty_summary_df

def sync_run_seo_analysis(*args):
    """Synchronous wrapper for the async function"""
    return asyncio.run(run_seo_analysis(*args))

def handle_analysis_and_download(api_key, keyword, client_url, competitor_urls_text, progress=gr.Progress()):
    """Handle analysis and prepare download file"""
    result = sync_run_seo_analysis(api_key, keyword, client_url, competitor_urls_text, progress)
    
    # If analysis was successful (4 outputs), prepare download
    if len(result) == 4 and isinstance(result[3], pd.DataFrame) and not result[3].empty:
        download_file_path = prepare_download(result[3])
        return result[0], result[1], result[2], download_file_path
    else:
        return result[0], result[1], result[2], None

# Create Gradio Interface with Glass Theme
def create_interface():
    with gr.Blocks(
        title="SEO Content Gap Analysis", 
        theme=gr.themes.Glass(
            primary_hue="blue",
            secondary_hue="slate", 
            neutral_hue="zinc",
            font="Inter"
        )
    ) as demo:
        gr.Markdown("""
        # 🔍 SEO Content Relevance Analysis
        
        Analyze how well your content matches a target keyword compared to competitors using AI-powered semantic similarity.
        
        **Enhanced with Trafilatura** for superior content extraction and intelligent header-based chunking.
        """)
        
        with gr.Row():
            with gr.Column(scale=1):
                gr.Markdown("### 🔑 Configuration")
                
                api_key = gr.Textbox(
                    label="OpenAI API Key",
                    placeholder="sk-...",
                    type="password",
                    info="Your OpenAI API key for embeddings and analysis"
                )
                
                keyword = gr.Textbox(
                    label="Target Keyword",
                    placeholder="e.g., python web scraping",
                    info="The keyword you want to optimize for"
                )
                
                client_url = gr.Textbox(
                    label="Your Page URL",
                    placeholder="https://yoursite.com/page",
                    info="The URL of your page to analyze"
                )
                
                competitor_urls = gr.Textbox(
                    label="Competitor URLs",
                    placeholder="https://competitor1.com/page\nhttps://competitor2.com/page",
                    lines=5,
                    info="One URL per line (2-5 competitors recommended)"
                )
                
                analyze_btn = gr.Button("🚀 Run Analysis", variant="primary", size="lg")
            
            with gr.Column(scale=2):
                gr.Markdown("### 📊 Results")
                
                with gr.Tabs():
                    with gr.TabItem("📝 SEO Report"):
                        report_output = gr.Markdown(
                            label="AI-Generated SEO Analysis Report",
                            value="Click 'Run Analysis' to generate your comprehensive SEO report with actionable insights..."
                        )
                    
                    with gr.TabItem("📈 Page Summary"):
                        summary_output = gr.Dataframe(
                            label="Page Performance Summary",
                            headers=["URL", "Type", "Total Chunks", "Avg Similarity", "Max Similarity"],
                            value=pd.DataFrame(columns=["URL", "Type", "Total Chunks", "Avg Similarity", "Max Similarity"])
                        )
                    
                    with gr.TabItem("🎯 Top Content"):
                        top_content_output = gr.Dataframe(
                            label="Top Performing Content Sections",
                            headers=["Rank", "Type", "Score", "Content Preview", "URL"],
                            value=pd.DataFrame(columns=["Rank", "Type", "Score", "Content Preview", "URL"])
                        )
                    
                    with gr.TabItem("📊 Vector Data"):
                        with gr.Row():
                            with gr.Column():
                                gr.Markdown("### 📥 Download Complete Analysis Data")
                                gr.Markdown("""
                                **Contains:**
                                - All content chunks with similarity scores
                                - Full content text for each chunk
                                - Header information and chunk types
                                - Perfect for further analysis in Excel/Python
                                """)
                                
                                download_file = gr.File(
                                    label="Vector Data CSV (Generated after analysis)",
                                    interactive=False
                                )
        
        # Enhanced example section
        gr.Markdown("""
        ### 💡 Example Usage
        
        **Keyword:** `content marketing strategy`  
        **Your URL:** `https://yoursite.com/content-marketing-guide`  
        **Competitors:**
        ```
        https://hubspot.com/content-marketing
        https://contentmarketinginstitute.com/strategy
        https://neilpatel.com/blog/content-marketing-strategy
        ```
        
        ### ✨ What's New
        - **Enhanced Content Extraction**: Uses Trafilatura for better content quality
        - **Intelligent Chunking**: Header-aware splitting for more accurate analysis  
        - **Improved Accuracy**: Better handling of complex page structures
        - **Glass Theme**: Modern, sleek interface design
        """)
        
        # Event handlers
        analyze_btn.click(
            fn=handle_analysis_and_download,
            inputs=[api_key, keyword, client_url, competitor_urls],
            outputs=[report_output, summary_output, top_content_output, download_file]
        )
        
        gr.Markdown("""
        ### ⚠️ Important Notes
        - Analysis may take 2-5 minutes depending on content size
        - Requires OpenAI API key (costs ~$0.01-0.10 per analysis)
        - Enhanced extraction works best with any type of web content
        - Trafilatura respects robots.txt and implements smart rate limiting
        - Glass theme provides modern, professional appearance
        """)
    
    return demo

# Launch the app
if __name__ == "__main__":
    demo = create_interface()
    demo.launch()