import streamlit as st import pandas as pd import numpy as np import networkit as nk import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots import time import gc from io import StringIO import random from collections import defaultdict # Set page config st.set_page_config( page_title="Website Link Impact Analyzer", page_icon="🔗", layout="wide", initial_sidebar_state="expanded" ) # Global cache for WWW graph if 'www_graph_cache' not in st.session_state: st.session_state.www_graph_cache = None def load_graph_from_csv_networkit(file_content, file_name): """ Load page links from CSV file using NetworKit - OPTIMIZED VERSION. """ try: # Read CSV content with optimized settings df = pd.read_csv( StringIO(file_content), dtype={'FROM': 'string', 'TO': 'string'}, # Specify types upfront na_filter=True, # Enable NA filtering skip_blank_lines=True # Skip empty lines ) # Check required columns with user-friendly names required_cols = ['FROM', 'TO'] if not all(col in df.columns for col in required_cols): st.error(f""" ❌ **File Format Error** Your CSV file needs these column names: - **FROM** (the page that has the link) - **TO** (the page being linked to) Your file has: {', '.join(df.columns)} """) return None, None, None # Fast data cleaning - vectorized operations initial_rows = len(df) df = df.dropna(subset=['FROM', 'TO']) # Remove rows with missing values if len(df) == 0: st.error(f"❌ No valid page links found in {file_name}") return None, None, None # Show cleaning stats if significant data was removed if initial_rows - len(df) > initial_rows * 0.1: # More than 10% removed st.warning(f"⚠️ Removed {initial_rows - len(df)} rows with missing data from {file_name}") # OPTIMIZED: Get unique nodes using pandas operations (much faster) all_nodes_series = pd.concat([df['FROM'], df['TO']]).drop_duplicates() all_nodes = all_nodes_series.tolist() # OPTIMIZED: Create node mapping node_to_idx = {node: i for i, node in enumerate(all_nodes)} # Create NetworKit graph G = nk.Graph(n=len(all_nodes), weighted=False, directed=True) # OPTIMIZED: Vectorized edge addition (MAJOR SPEEDUP) # Convert node names to indices using vectorized operations source_indices = df['FROM'].map(node_to_idx).values target_indices = df['TO'].map(node_to_idx).values # Bulk add edges using numpy arrays (much faster than iterrows) for src_idx, tgt_idx in zip(source_indices, target_indices): G.addEdge(int(src_idx), int(tgt_idx)) return G, all_nodes, node_to_idx except Exception as e: st.error(f"❌ **Error reading file**: {str(e)}") st.info("💡 **Tip**: Make sure your file is a valid CSV with FROM and TO columns for page links") return None, None, None def create_www_graph_networkit(n_nodes, m_edges, seed=42): """ Create a realistic internet simulation using NetworKit with Barabási-Albert model. """ cache_key = (n_nodes, m_edges, seed) if (st.session_state.www_graph_cache is not None and st.session_state.www_graph_cache[0] == cache_key): return st.session_state.www_graph_cache[1] # Set random seed for NetworKit nk.setSeed(seed, False) # Always use Barabási-Albert generator for realistic web structure generator = nk.generators.BarabasiAlbertGenerator(k=m_edges, nMax=n_nodes, n0=m_edges) www_graph = generator.generate() # Make it directed if not www_graph.isDirected(): directed_graph = nk.Graph(n=www_graph.numberOfNodes(), weighted=False, directed=True) for u, v in www_graph.iterEdges(): directed_graph.addEdge(u, v) directed_graph.addEdge(v, u) # Make bidirectional www_graph = directed_graph # Cache the result st.session_state.www_graph_cache = (cache_key, www_graph) return www_graph def cleanup_memory(): """Clean up memory after large graph operations.""" gc.collect() # Force garbage collection def process_configuration_networkit(www_graph, kalicube_graph, kalicube_nodes, min_connections=5, max_connections=50): """ Test how your page network performs in the real internet using NetworKit. """ # Get WWW graph info www_node_count = www_graph.numberOfNodes() kalicube_node_count = len(kalicube_nodes) # Create node mapping for kalicube nodes kalicube_offset = www_node_count kalicube_node_mapping = {} for i, node in enumerate(kalicube_nodes): new_node_id = kalicube_offset + i kalicube_node_mapping[node] = new_node_id # Create merged graph total_nodes = www_node_count + kalicube_node_count merged_graph = nk.Graph(n=total_nodes, weighted=False, directed=True) # Add WWW edges for u, v in www_graph.iterEdges(): merged_graph.addEdge(u, v) # Add kalicube edges with new node IDs kalicube_idx_to_node = {i: node for i, node in enumerate(kalicube_nodes)} for u, v in kalicube_graph.iterEdges(): source_node = kalicube_idx_to_node[u] target_node = kalicube_idx_to_node[v] new_source_id = kalicube_node_mapping[source_node] new_target_id = kalicube_node_mapping[target_node] merged_graph.addEdge(new_source_id, new_target_id) # Randomly connect kalicube pages to WWW n_connections = min(min_connections, www_node_count, kalicube_node_count) www_sample = random.sample(range(www_node_count), n_connections) kalicube_sample = random.sample(list(kalicube_node_mapping.values()), n_connections) for www_node, kalicube_node in zip(www_sample, kalicube_sample): merged_graph.addEdge(www_node, kalicube_node) # Calculate PageRank using NetworKit with optimized settings for large graphs try: if total_nodes >= 500000: # Use more relaxed tolerance for very large graphs pagerank_algo = nk.centrality.PageRank(merged_graph, damp=0.85, tol=1e-4) else: pagerank_algo = nk.centrality.PageRank(merged_graph, damp=0.85, tol=1e-6) pagerank_algo.run() pagerank_values = pagerank_algo.scores() except Exception as e: st.warning(f"PageRank calculation failed: {e}. Using degree centrality instead.") # Fallback to degree centrality degree_algo = nk.centrality.DegreeCentrality(merged_graph, normalized=True) degree_algo.run() pagerank_values = degree_algo.scores() # Extract PageRank values for kalicube nodes pagerank_dict = {} for node, node_id in kalicube_node_mapping.items(): pagerank_dict[node] = pagerank_values[node_id] if node_id < len(pagerank_values) else 0.0 return pagerank_dict def create_comparison_dataframe(pagerank_old_dict, pagerank_new_dict, simulation_id): """ Compare before and after results. """ # Find pages that appear in both tests old_urls = set(pagerank_old_dict.keys()) new_urls = set(pagerank_new_dict.keys()) common_urls = old_urls & new_urls if not common_urls: return pd.DataFrame() # Create comparison data comparison_data = [] # Sort pages by importance for ranking old_sorted = sorted(pagerank_old_dict.items(), key=lambda x: x[1], reverse=True) new_sorted = sorted(pagerank_new_dict.items(), key=lambda x: x[1], reverse=True) # Create ranking mappings old_ranks = {url: rank + 1 for rank, (url, _) in enumerate(old_sorted)} new_ranks = {url: rank + 1 for rank, (url, _) in enumerate(new_sorted)} for url in common_urls: importance_before = pagerank_old_dict[url] importance_after = pagerank_new_dict[url] rank_before = old_ranks[url] rank_after = new_ranks[url] importance_change = importance_after - importance_before importance_change_pct = (importance_change / importance_before) * 100 if importance_before > 0 else 0 rank_change = rank_after - rank_before rank_change_pct = (rank_change / rank_before) * 100 if rank_before > 0 else 0 comparison_data.append({ 'Page_URL': url, 'Importance_Before': importance_before, 'Importance_After': importance_after, 'Rank_Before': rank_before, 'Rank_After': rank_after, 'Importance_Change': importance_change, 'Importance_Change_%': importance_change_pct, 'Rank_Change': rank_change, 'Rank_Change_%': rank_change_pct, 'Test_Number': simulation_id }) return pd.DataFrame(comparison_data) def run_single_simulation(simulation_id, kalicube_graph_old, kalicube_graph_new, kalicube_nodes_old, kalicube_nodes_new, www_nodes, www_edges, min_conn, max_conn): """ Run one test comparing before and after. """ sim_seed = 42 + simulation_id random.seed(sim_seed) np.random.seed(sim_seed) # Create internet simulation www_graph = create_www_graph_networkit(www_nodes, www_edges, sim_seed) # Test original setup importance_old_dict = process_configuration_networkit( www_graph, kalicube_graph_old, kalicube_nodes_old, min_conn, max_conn ) # Test new setup importance_new_dict = process_configuration_networkit( www_graph, kalicube_graph_new, kalicube_nodes_new, min_conn, max_conn ) # Compare results comparison_df = create_comparison_dataframe( importance_old_dict, importance_new_dict, simulation_id ) # Clean up memory after large operations cleanup_memory() if comparison_df.empty: return None, None # Calculate summary total_before = comparison_df['Importance_Before'].sum() total_after = comparison_df['Importance_After'].sum() total_change = total_after - total_before change_pct = (total_change / total_before) * 100 if total_before > 0 else 0 rank_changes = comparison_df['Rank_Change'].values rank_improvements = np.sum(rank_changes < 0) # Lower rank number = better rank_drops = np.sum(rank_changes > 0) rank_unchanged = np.sum(rank_changes == 0) avg_rank_change = np.mean(rank_changes) result = { 'Test_Number': simulation_id + 1, 'Total_Before': total_before, 'Total_After': total_after, 'Total_Change': total_change, 'Change_Percent': change_pct, 'Pages_Improved': rank_improvements, 'Pages_Dropped': rank_drops, 'Pages_Unchanged': rank_unchanged, 'Avg_Rank_Change': avg_rank_change } return result, comparison_df def get_traffic_light_status(results_df, confidence_threshold=0.7): """ Simple decision guidance based on test results. """ total_tests = len(results_df) positive_outcomes = (results_df['Total_Change'] > 0).sum() negative_outcomes = (results_df['Total_Change'] < 0).sum() positive_ratio = positive_outcomes / total_tests negative_ratio = negative_outcomes / total_tests mean_impact = results_df['Change_Percent'].mean() # Simple traffic light logic if positive_ratio >= confidence_threshold and mean_impact > 1.0: return "🟢", "✅ GO AHEAD - Your changes look great!", "go", "Most tests show good results. Your changes should help your page rankings." elif positive_ratio >= confidence_threshold and mean_impact > 0: return "🟡", "⚠️ PROCEED CAREFULLY - Small improvements expected", "caution", "Tests show some improvement, but it's modest. Consider if the effort is worth it." elif negative_ratio >= confidence_threshold and mean_impact < -1.0: return "🔴", "❌ STOP - Your changes may hurt your page rankings", "stop", "Most tests show negative results. Consider revising your changes before implementing." elif negative_ratio >= confidence_threshold and mean_impact < 0: return "🟡", "⚠️ PROCEED CAREFULLY - Some negative impact expected", "caution", "Tests show some negative impact. Monitor closely if you proceed." else: return "🟡", "🤷 MIXED RESULTS - Hard to predict", "caution", "Test results are mixed. Consider running more tests or getting expert advice." def create_simple_visualizations(results_df, all_comparisons_df, confidence_threshold=0.7): """ Create easy-to-understand visualizations. """ # Traffic Light Assessment traffic_emoji, traffic_status, traffic_level, explanation = get_traffic_light_status(results_df, confidence_threshold) st.markdown("## 🚦 **Should You Make These Changes?**") # Big, clear recommendation if traffic_level == "go": st.success(f"# {traffic_emoji}") st.success(f"## {traffic_status}") st.info(f"**Why:** {explanation}") elif traffic_level == "stop": st.error(f"# {traffic_emoji}") st.error(f"## {traffic_status}") st.warning(f"**Why:** {explanation}") else: st.warning(f"# {traffic_emoji}") st.warning(f"## {traffic_status}") st.info(f"**Why:** {explanation}") # Simple metrics in plain English st.markdown("### 📊 **Test Results Summary**") col1, col2, col3 = st.columns(3) with col1: positive_tests = (results_df['Total_Change'] > 0).sum() total_tests = len(results_df) st.metric("Tests Showing Improvement", f"{positive_tests} out of {total_tests}", delta=f"{positive_tests/total_tests:.0%} positive") with col2: mean_change = results_df['Change_Percent'].mean() st.metric("Average Impact on Rankings", f"{mean_change:.1f}%", delta="Higher is better") with col3: improved_sites = results_df['Pages_Improved'].mean() st.metric("Pages That Improved (avg)", f"{improved_sites:.0f}", delta="per test") def main(): st.title("🔗 Page Link Impact Analyzer (Powered by NetworKit)") st.markdown("**Find out if your page link changes will help or hurt your search rankings**") # Simple intro st.info(""" 👋 **Welcome!** This tool helps you test page link changes before you make them. **What it does:** Simulates how your link changes might affect your page rankings in search engines. **What you need:** Two CSV files - one with your current page links, one with your planned changes. ⚡ **Now powered by NetworKit** - A high-performance network analysis toolkit for faster and more efficient analysis of large-scale networks! """) # Sidebar - simplified st.sidebar.header("⚙️ Settings") # File uploads with better guidance st.sidebar.markdown("### 📁 **Step 1: Upload Your Files**") st.sidebar.markdown("*Need help with file format? Check the 'File Format Help' section below.*") old_file = st.sidebar.file_uploader("Current Page Links (CSV)", type=['csv'], key="old", help="Upload a CSV file with your current page links") new_file = st.sidebar.file_uploader("Planned Page Links (CSV)", type=['csv'], key="new", help="Upload a CSV file with your planned page links") # Simplified settings st.sidebar.markdown("### 🎯 **Step 2: Test Settings**") num_tests = st.sidebar.select_slider( "How many tests to run?", options=[5, 10, 15, 20, 25, 30], value=10, help="More tests = more reliable results, but takes longer" ) internet_size = st.sidebar.select_slider( "Internet simulation size", options=[ "Large (100K sites)", "Very Large (250K sites)", "Huge (500K sites)", "Massive (750K sites)", "Ultra (1M sites)" ], value="Large (100K sites)", help="Larger = more realistic but much slower. WARNING: 500K+ may take several minutes per test!" ) # Convert internet size to numbers size_map = { "Large (100K sites)": 100000, "Very Large (250K sites)": 250000, "Huge (500K sites)": 500000, "Massive (750K sites)": 750000, "Ultra (1M sites)": 1000000 } www_nodes = size_map[internet_size] # Performance warnings if www_nodes >= 500000: st.sidebar.warning(f""" ⚠️ **Performance Warning**: {internet_size} with Barabási-Albert will be very slow! Expect 4-15 minutes per test. Consider using fewer tests. """) elif www_nodes >= 250000: st.sidebar.info(f""" ℹ️ **Note**: {internet_size} with Barabási-Albert may take 30-90 seconds per test. """) # Add Barabási-Albert info with st.sidebar.expander("🔬 About Barabási-Albert Model"): st.markdown(""" **Why Barabási-Albert?** - Creates **scale-free networks** like the real web - **Preferential attachment**: Popular pages get more links - **Power-law distribution**: Most realistic web simulation - Slower than other models but much more accurate **Perfect for**: Testing how link changes affect rankings in realistic web conditions. """) # Advanced settings (hidden by default) with st.sidebar.expander("🔧 Advanced Settings (Optional)"): confidence_level = st.slider("Confidence level for recommendations", 60, 90, 70, 5, help="Higher = stricter requirements for green/red lights") show_details = st.checkbox("Show detailed results", False) auto_run = st.checkbox("Auto-run when files uploaded", False) confidence_threshold = confidence_level / 100 # Main content if old_file is not None and new_file is not None: # Load files old_content = old_file.getvalue().decode('utf-8') new_content = new_file.getvalue().decode('utf-8') # Show file status col1, col2 = st.columns(2) with col1: st.success(f"✅ **Current Page Links**: {old_file.name}") with col2: st.success(f"✅ **Planned Page Links**: {new_file.name}") # Load and validate files with st.spinner("Reading your files..."): kalicube_graph_old, kalicube_nodes_old, kalicube_url_mapping_old = \ load_graph_from_csv_networkit(old_content, old_file.name) kalicube_graph_new, kalicube_nodes_new, kalicube_url_mapping_new = \ load_graph_from_csv_networkit(new_content, new_file.name) if kalicube_graph_old is not None and kalicube_graph_new is not None: # Show what we found st.markdown("### 📈 **What We Found in Your Files**") info_col1, info_col2 = st.columns(2) with info_col1: st.info(f""" **Current Setup:** - {len(kalicube_nodes_old)} pages - {kalicube_graph_old.numberOfEdges()} links between them """) with info_col2: st.info(f""" **Planned Setup:** - {len(kalicube_nodes_new)} pages - {kalicube_graph_new.numberOfEdges()} links between them """) # Big, obvious run button st.markdown("### 🚀 **Step 3: Run the Test**") run_button = st.button("🔬 Test My Changes", type="primary", use_container_width=True) if run_button or auto_run: # Automatically reduce tests for very large simulations if www_nodes >= 750000 and num_tests > 10: st.warning(f"⚠️ Automatically reducing tests from {num_tests} to 10 for {internet_size} to prevent timeout.") num_tests = min(num_tests, 10) elif www_nodes >= 500000 and num_tests > 15: st.warning(f"⚠️ Automatically reducing tests from {num_tests} to 15 for {internet_size}.") num_tests = min(num_tests, 15) # Progress with encouraging messages progress_bar = st.progress(0) status_text = st.empty() # Dynamic messages based on simulation size if www_nodes >= 500000: encouraging_messages = [ f"🔬 Creating massive Barabási-Albert simulation ({www_nodes:,} sites)... This will take a while!", "🌐 Building scale-free network topology with preferential attachment...", "📊 Computing PageRank scores for massive scale-free network...", "🎯 Running test with millions of preferential connections...", "📈 Almost there! Processing final calculations..." ] else: encouraging_messages = [ f"🔬 Setting up Barabási-Albert simulation ({www_nodes:,} sites)...", "🌐 Creating scale-free network with preferential attachment...", "📊 Calculating page importance scores...", "🎯 Running tests with different scenarios...", "📈 Almost done! Analyzing results..." ] all_results = [] all_comparisons = [] start_time = time.time() # Run tests with encouragement for i in range(num_tests): msg_idx = min(i // max(1, num_tests // len(encouraging_messages)), len(encouraging_messages) - 1) status_text.text(f"{encouraging_messages[msg_idx]} (Test {i+1}/{num_tests})") progress_bar.progress((i + 1) / num_tests) result, comparison_df = run_single_simulation( i, kalicube_graph_old, kalicube_graph_new, kalicube_nodes_old, kalicube_nodes_new, www_nodes, 2, 5, 25 # simplified parameters ) if result is not None: all_results.append(result) all_comparisons.append(comparison_df) end_time = time.time() # Clear progress progress_bar.empty() status_text.empty() if all_results: results_df = pd.DataFrame(all_results) all_comparisons_df = pd.concat(all_comparisons, ignore_index=True) if all_comparisons else pd.DataFrame() # Show results st.success(f"🎉 **Test Complete!** Ran {len(all_results)} tests in {end_time - start_time:.0f} seconds") # Create simple visualizations create_simple_visualizations(results_df, all_comparisons_df, confidence_threshold) # Download section st.markdown("### 💾 **Save Your Results**") col1, col2 = st.columns(2) with col1: csv_summary = results_df.to_csv(index=False) st.download_button( label="📊 Download Summary Report", data=csv_summary, file_name=f"website_impact_summary_{int(time.time())}.csv", mime="text/csv" ) with col2: if not all_comparisons_df.empty: csv_detailed = all_comparisons_df.to_csv(index=False) st.download_button( label="📋 Download Detailed Results", data=csv_detailed, file_name=f"website_impact_detailed_{int(time.time())}.csv", mime="text/csv" ) # Show detailed results if requested if show_details and not all_comparisons_df.empty: st.markdown("### 🔍 **Detailed Results** (For the curious)") # Simple filter st.markdown("**Filter results:**") filter_col1, filter_col2 = st.columns(2) with filter_col1: min_change = st.number_input("Show changes above (%)", value=float(all_comparisons_df['Importance_Change_%'].min()), step=0.1) # Apply filter and show filtered_df = all_comparisons_df[all_comparisons_df['Importance_Change_%'] >= min_change] # Rename columns for clarity display_df = filtered_df.copy() display_df = display_df.rename(columns={ 'Page_URL': 'Page URL', 'Importance_Change_%': 'Impact (%)', 'Rank_Change': 'Rank Change', 'Test_Number': 'Test #' }) st.dataframe( display_df[['Page URL', 'Impact (%)', 'Rank_Change', 'Test #']].sort_values('Impact (%)', ascending=False), use_container_width=True, height=300 ) else: st.error("❌ No test results generated. Please check your files and try again.") else: # Help section when no files uploaded st.markdown("---") # File format help with st.expander("📋 **File Format Help** - How to prepare your CSV files"): st.markdown(""" ### ✅ **Correct Format** Your CSV files need exactly these column names: - **FROM** = the page that has the link - **TO** = the page being linked to ### 📝 **Example:** ``` FROM,TO mysite.com/about,mysite.com/contact mysite.com/blog/post1,partner.com/resource partner.com/page,mysite.com/services ``` ### 💡 **Tips:** - Use any spreadsheet program (Excel, Google Sheets) to create these - Save as CSV format - Include full URLs or page paths - Make sure page URLs are consistent (mysite.com/page vs mysite.com/page/ are different!) - Each row represents one link from one page to another """) with st.expander("🤔 **What This Tool Actually Does** - Explained Simply"): st.markdown(""" ### 🌐 **The Big Picture** When you change links between your pages, it affects how search engines see your site. But it's hard to predict the exact impact because the internet is huge and constantly changing. ### 🧪 **Our Solution: Virtual Testing** 1. **We simulate the internet** - Create a virtual version with hundreds of thousands or millions of pages 2. **We test your changes** - Run your current page links vs. your planned links 3. **We repeat many times** - Each test uses slightly different internet conditions 4. **We analyze the pattern** - Look at whether your changes usually help or hurt ### 🚦 **The Traffic Light System** - **🟢 Green = Go ahead** - Most tests show your changes help - **🟡 Yellow = Be careful** - Mixed results or small impact - **🔴 Red = Stop** - Most tests show your changes hurt ### 🎯 **Why This Works** Instead of guessing, you get data-driven confidence about your page link changes! ### ⚡ **Powered by NetworKit** This version uses NetworKit, a high-performance network analysis toolkit that's much faster than traditional tools for analyzing large networks. It uses the **Barabási-Albert model** to create realistic scale-free networks that mimic the actual structure of the web! ### 🔬 **Large-Scale Barabási-Albert Simulations** - **100K sites**: ~10-30 seconds per test - **250K sites**: ~30-90 seconds per test - **500K sites**: ~2-5 minutes per test - **750K sites**: ~4-8 minutes per test - **1M sites**: ~6-15 minutes per test **Note**: Barabási-Albert is more computationally intensive than other generators but produces the most realistic web-like structure with power-law degree distributions. """) with st.expander("❓ **Common Questions**"): st.markdown(""" **Q: How accurate is this?** A: The tool shows trends and probabilities, not exact predictions. It's like weather forecasting - very useful for planning! **Q: How long does it take?** A: From 30 seconds to 10 minutes per test, depending on simulation size. NetworKit makes it much faster than before! **Q: What if I get yellow results?** A: Yellow means proceed carefully. Consider running more tests, getting expert advice, or monitoring closely if you implement. **Q: Can I test multiple scenarios?** A: Yes! Just upload different "planned changes" files to compare options. **Q: What file size limits?** A: Works best with up to 50,000 page links. Larger files may be slow. **Q: What's the difference between pages and websites?** A: Pages are specific URLs (like mysite.com/about), while websites are domains (like mysite.com). This tool analyzes individual page links. **Q: What's NetworKit?** A: NetworKit is a high-performance network analysis toolkit with optimized C++ algorithms. This tool specifically uses the **Barabási-Albert model** to generate scale-free networks that accurately represent real web topology. **Q: Why Barabási-Albert specifically?** A: The Barabási-Albert model creates "scale-free" networks with preferential attachment - meaning popular pages get more links, just like the real web. This produces the most realistic simulation of how link changes affect rankings. **Q: Which simulation size should I choose?** A: Start with 100K for testing. Use 250K-500K for realistic results. Only use 750K+ if you have time and want maximum realism. Larger = more realistic but much slower. **Q: Why does Barabási-Albert take longer than other generators?** A: Barabási-Albert builds networks step-by-step with preferential attachment, which is more computationally intensive but produces much more realistic web-like structures than faster alternatives. """) if __name__ == "__main__": main()