testing / app.py
Em4e's picture
Create app.py
287f82a verified
raw
history blame
28.6 kB
import streamlit as st
import pandas as pd
import numpy as np
import grape
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import time
import gc
from io import StringIO
import random
from collections import defaultdict
# Set page config
st.set_page_config(
page_title="Website Link Impact Analyzer",
page_icon="πŸ”—",
layout="wide",
initial_sidebar_state="expanded"
)
# Global cache for WWW graph
if 'www_graph_cache' not in st.session_state:
st.session_state.www_graph_cache = None
def load_graph_from_csv_grape(file_content, file_name):
"""
Load page links from CSV file using Grape.
"""
try:
# Read CSV content
df = pd.read_csv(StringIO(file_content))
# Check required columns with user-friendly names
required_cols = ['FROM', 'TO']
if not all(col in df.columns for col in required_cols):
st.error(f"""
❌ **File Format Error**
Your CSV file needs these column names:
- **FROM** (the page that has the link)
- **TO** (the page being linked to)
Your file has: {', '.join(df.columns)}
""")
return None, None, None
# Clean data
df = df.dropna(subset=['FROM', 'TO'])
df['FROM'] = df['FROM'].astype(str)
df['TO'] = df['TO'].astype(str)
if len(df) == 0:
st.error(f"❌ No valid page links found in {file_name}")
return None, None, None
# Get unique nodes and create mapping
all_nodes = list(set(df['FROM'].tolist() + df['TO'].tolist()))
node_to_idx = {node: i for i, node in enumerate(all_nodes)}
# Create edge list with indices
edge_list = []
for _, row in df.iterrows():
source_idx = node_to_idx[row['FROM']]
target_idx = node_to_idx[row['TO']]
edge_list.append((source_idx, target_idx))
# Create Grape graph
G = grape.Graph.from_edge_list(
edge_list=edge_list,
directed=True,
node_names=[str(i) for i in range(len(all_nodes))],
name=f"graph_{file_name}"
)
return G, all_nodes, node_to_idx
except Exception as e:
st.error(f"❌ **Error reading file**: {str(e)}")
st.info("πŸ’‘ **Tip**: Make sure your file is a valid CSV with FROM and TO columns for page links")
return None, None, None
def create_www_graph_grape(n_nodes, m_edges, seed=42):
"""
Create a realistic internet simulation using Grape.
"""
cache_key = (n_nodes, m_edges, seed)
if (st.session_state.www_graph_cache is not None and
st.session_state.www_graph_cache[0] == cache_key):
return st.session_state.www_graph_cache[1]
# Set random seed
random.seed(seed)
np.random.seed(seed)
# Create BarabΓ‘si-Albert graph manually since Grape doesn't have this built-in
# Start with a complete graph of m_edges nodes
edges = []
for i in range(m_edges):
for j in range(i + 1, m_edges):
edges.append((i, j))
edges.append((j, i)) # Make it directed
# Add remaining nodes with preferential attachment
degrees = [2 * m_edges] * m_edges # Initial degrees
for new_node in range(m_edges, n_nodes):
# Select m_edges nodes to connect to based on preferential attachment
total_degree = sum(degrees)
targets = set()
while len(targets) < min(m_edges, new_node):
# Probability proportional to degree
rand_val = random.random() * total_degree
cumsum = 0
for i, degree in enumerate(degrees):
cumsum += degree
if cumsum >= rand_val and i not in targets:
targets.add(i)
break
# Add edges
for target in targets:
edges.append((new_node, target))
edges.append((target, new_node)) # Bidirectional
# Update degrees
degrees.append(2 * len(targets))
for target in targets:
degrees[target] += 2
# Create Grape graph
www_graph = grape.Graph.from_edge_list(
edge_list=edges,
directed=True,
node_names=[str(i) for i in range(n_nodes)],
name="www_simulation"
)
# Cache the result
st.session_state.www_graph_cache = (cache_key, www_graph)
return www_graph
def process_configuration_grape(www_graph, kalicube_graph, kalicube_nodes,
min_connections=5, max_connections=50):
"""
Test how your page network performs in the real internet using Grape.
"""
# Get WWW graph info
www_node_count = www_graph.get_number_of_nodes()
kalicube_node_count = len(kalicube_nodes)
# Create node mapping for kalicube nodes
kalicube_offset = www_node_count
kalicube_node_mapping = {}
for i, node in enumerate(kalicube_nodes):
new_node_id = kalicube_offset + i
kalicube_node_mapping[node] = new_node_id
# Get edges from both graphs
www_edges = www_graph.get_edge_list()
kalicube_edges = kalicube_graph.get_edge_list()
# Convert kalicube edges to use new node IDs
kalicube_mapped_edges = []
kalicube_idx_to_node = {i: node for node, i in kalicube_graph.get_node_name_to_node_id_map().items()}
for source_idx, target_idx in kalicube_edges:
source_node = kalicube_idx_to_node[source_idx]
target_node = kalicube_idx_to_node[target_idx]
new_source_id = kalicube_node_mapping[source_node]
new_target_id = kalicube_node_mapping[target_node]
kalicube_mapped_edges.append((new_source_id, new_target_id))
# Randomly connect kalicube pages to WWW
n_connections = min(min_connections, www_node_count, kalicube_node_count)
www_sample = random.sample(range(www_node_count), n_connections)
kalicube_sample = random.sample(list(kalicube_node_mapping.values()), n_connections)
connection_edges = []
for www_node, kalicube_node in zip(www_sample, kalicube_sample):
connection_edges.append((www_node, kalicube_node))
# Combine all edges
all_edges = list(www_edges) + kalicube_mapped_edges + connection_edges
total_nodes = www_node_count + kalicube_node_count
# Create merged graph
merged_graph = grape.Graph.from_edge_list(
edge_list=all_edges,
directed=True,
node_names=[str(i) for i in range(total_nodes)],
name="merged_simulation"
)
# Calculate PageRank
try:
pagerank_values = merged_graph.pagerank(
damping_factor=0.85,
maximum_iterations=100,
tolerance=1e-6
)
except Exception as e:
st.warning(f"PageRank calculation failed: {e}. Using degree centrality instead.")
# Fallback to degree centrality
degrees = merged_graph.get_node_degrees()
total_degree = sum(degrees)
pagerank_values = [deg / total_degree if total_degree > 0 else 0 for deg in degrees]
# Extract PageRank values for kalicube nodes
pagerank_dict = {}
for node, node_id in kalicube_node_mapping.items():
pagerank_dict[node] = pagerank_values[node_id] if node_id < len(pagerank_values) else 0.0
return pagerank_dict
def create_comparison_dataframe(pagerank_old_dict, pagerank_new_dict, simulation_id):
"""
Compare before and after results.
"""
# Find pages that appear in both tests
old_urls = set(pagerank_old_dict.keys())
new_urls = set(pagerank_new_dict.keys())
common_urls = old_urls & new_urls
if not common_urls:
return pd.DataFrame()
# Create comparison data
comparison_data = []
# Sort pages by importance for ranking
old_sorted = sorted(pagerank_old_dict.items(), key=lambda x: x[1], reverse=True)
new_sorted = sorted(pagerank_new_dict.items(), key=lambda x: x[1], reverse=True)
# Create ranking mappings
old_ranks = {url: rank + 1 for rank, (url, _) in enumerate(old_sorted)}
new_ranks = {url: rank + 1 for rank, (url, _) in enumerate(new_sorted)}
for url in common_urls:
importance_before = pagerank_old_dict[url]
importance_after = pagerank_new_dict[url]
rank_before = old_ranks[url]
rank_after = new_ranks[url]
importance_change = importance_after - importance_before
importance_change_pct = (importance_change / importance_before) * 100 if importance_before > 0 else 0
rank_change = rank_after - rank_before
rank_change_pct = (rank_change / rank_before) * 100 if rank_before > 0 else 0
comparison_data.append({
'Page_URL': url,
'Importance_Before': importance_before,
'Importance_After': importance_after,
'Rank_Before': rank_before,
'Rank_After': rank_after,
'Importance_Change': importance_change,
'Importance_Change_%': importance_change_pct,
'Rank_Change': rank_change,
'Rank_Change_%': rank_change_pct,
'Test_Number': simulation_id
})
return pd.DataFrame(comparison_data)
def run_single_simulation(simulation_id, kalicube_graph_old, kalicube_graph_new,
kalicube_nodes_old, kalicube_nodes_new,
www_nodes, www_edges, min_conn, max_conn):
"""
Run one test comparing before and after.
"""
sim_seed = 42 + simulation_id
random.seed(sim_seed)
np.random.seed(sim_seed)
# Create internet simulation
www_graph = create_www_graph_grape(www_nodes, www_edges, sim_seed)
# Test original setup
importance_old_dict = process_configuration_grape(
www_graph, kalicube_graph_old, kalicube_nodes_old, min_conn, max_conn
)
# Test new setup
importance_new_dict = process_configuration_grape(
www_graph, kalicube_graph_new, kalicube_nodes_new, min_conn, max_conn
)
# Compare results
comparison_df = create_comparison_dataframe(
importance_old_dict, importance_new_dict, simulation_id
)
if comparison_df.empty:
return None, None
# Calculate summary
total_before = comparison_df['Importance_Before'].sum()
total_after = comparison_df['Importance_After'].sum()
total_change = total_after - total_before
change_pct = (total_change / total_before) * 100 if total_before > 0 else 0
rank_changes = comparison_df['Rank_Change'].values
rank_improvements = np.sum(rank_changes < 0) # Lower rank number = better
rank_drops = np.sum(rank_changes > 0)
rank_unchanged = np.sum(rank_changes == 0)
avg_rank_change = np.mean(rank_changes)
result = {
'Test_Number': simulation_id + 1,
'Total_Before': total_before,
'Total_After': total_after,
'Total_Change': total_change,
'Change_Percent': change_pct,
'Pages_Improved': rank_improvements,
'Pages_Dropped': rank_drops,
'Pages_Unchanged': rank_unchanged,
'Avg_Rank_Change': avg_rank_change
}
return result, comparison_df
def get_traffic_light_status(results_df, confidence_threshold=0.7):
"""
Simple decision guidance based on test results.
"""
total_tests = len(results_df)
positive_outcomes = (results_df['Total_Change'] > 0).sum()
negative_outcomes = (results_df['Total_Change'] < 0).sum()
positive_ratio = positive_outcomes / total_tests
negative_ratio = negative_outcomes / total_tests
mean_impact = results_df['Change_Percent'].mean()
# Simple traffic light logic
if positive_ratio >= confidence_threshold and mean_impact > 1.0:
return "🟒", "βœ… GO AHEAD - Your changes look great!", "go", "Most tests show good results. Your changes should help your page rankings."
elif positive_ratio >= confidence_threshold and mean_impact > 0:
return "🟑", "⚠️ PROCEED CAREFULLY - Small improvements expected", "caution", "Tests show some improvement, but it's modest. Consider if the effort is worth it."
elif negative_ratio >= confidence_threshold and mean_impact < -1.0:
return "πŸ”΄", "❌ STOP - Your changes may hurt your page rankings", "stop", "Most tests show negative results. Consider revising your changes before implementing."
elif negative_ratio >= confidence_threshold and mean_impact < 0:
return "🟑", "⚠️ PROCEED CAREFULLY - Some negative impact expected", "caution", "Tests show some negative impact. Monitor closely if you proceed."
else:
return "🟑", "🀷 MIXED RESULTS - Hard to predict", "caution", "Test results are mixed. Consider running more tests or getting expert advice."
def create_simple_visualizations(results_df, all_comparisons_df, confidence_threshold=0.7):
"""
Create easy-to-understand visualizations.
"""
# Traffic Light Assessment
traffic_emoji, traffic_status, traffic_level, explanation = get_traffic_light_status(results_df, confidence_threshold)
st.markdown("## 🚦 **Should You Make These Changes?**")
# Big, clear recommendation
if traffic_level == "go":
st.success(f"# {traffic_emoji}")
st.success(f"## {traffic_status}")
st.info(f"**Why:** {explanation}")
elif traffic_level == "stop":
st.error(f"# {traffic_emoji}")
st.error(f"## {traffic_status}")
st.warning(f"**Why:** {explanation}")
else:
st.warning(f"# {traffic_emoji}")
st.warning(f"## {traffic_status}")
st.info(f"**Why:** {explanation}")
# Simple metrics in plain English
st.markdown("### πŸ“Š **Test Results Summary**")
col1, col2, col3 = st.columns(3)
with col1:
positive_tests = (results_df['Total_Change'] > 0).sum()
total_tests = len(results_df)
st.metric("Tests Showing Improvement", f"{positive_tests} out of {total_tests}",
delta=f"{positive_tests/total_tests:.0%} positive")
with col2:
mean_change = results_df['Change_Percent'].mean()
st.metric("Average Impact on Rankings", f"{mean_change:.1f}%",
delta="Higher is better")
with col3:
improved_sites = results_df['Pages_Improved'].mean()
st.metric("Pages That Improved (avg)", f"{improved_sites:.0f}",
delta="per test")
def main():
st.title("πŸ”— Page Link Impact Analyzer (Powered by Grape)")
st.markdown("**Find out if your page link changes will help or hurt your search rankings**")
# Simple intro
st.info("""
πŸ‘‹ **Welcome!** This tool helps you test page link changes before you make them.
**What it does:** Simulates how your link changes might affect your page rankings in search engines.
**What you need:** Two CSV files - one with your current page links, one with your planned changes.
πŸ‡ **Now powered by Grape** - A high-performance graph library for faster and more efficient analysis!
""")
# Sidebar - simplified
st.sidebar.header("βš™οΈ Settings")
# File uploads with better guidance
st.sidebar.markdown("### πŸ“ **Step 1: Upload Your Files**")
st.sidebar.markdown("*Need help with file format? Check the 'File Format Help' section below.*")
old_file = st.sidebar.file_uploader("Current Page Links (CSV)", type=['csv'], key="old",
help="Upload a CSV file with your current page links")
new_file = st.sidebar.file_uploader("Planned Page Links (CSV)", type=['csv'], key="new",
help="Upload a CSV file with your planned page links")
# Simplified settings
st.sidebar.markdown("### 🎯 **Step 2: Test Settings**")
num_tests = st.sidebar.select_slider(
"How many tests to run?",
options=[5, 10, 15, 20, 25, 30],
value=10,
help="More tests = more reliable results, but takes longer"
)
internet_size = st.sidebar.select_slider(
"Internet simulation size",
options=["Small (5K sites)", "Medium (10K sites)", "Large (25K sites)", "Huge (50K sites)"],
value="Medium (10K sites)",
help="Larger = more realistic but slower"
)
# Convert internet size to numbers
size_map = {
"Small (5K sites)": 5000,
"Medium (10K sites)": 10000,
"Large (25K sites)": 25000,
"Huge (50K sites)": 50000
}
www_nodes = size_map[internet_size]
# Advanced settings (hidden by default)
with st.sidebar.expander("πŸ”§ Advanced Settings (Optional)"):
confidence_level = st.slider("Confidence level for recommendations", 60, 90, 70, 5,
help="Higher = stricter requirements for green/red lights")
show_details = st.checkbox("Show detailed results", False)
auto_run = st.checkbox("Auto-run when files uploaded", False)
confidence_threshold = confidence_level / 100
# Main content
if old_file is not None and new_file is not None:
# Load files
old_content = old_file.getvalue().decode('utf-8')
new_content = new_file.getvalue().decode('utf-8')
# Show file status
col1, col2 = st.columns(2)
with col1:
st.success(f"βœ… **Current Page Links**: {old_file.name}")
with col2:
st.success(f"βœ… **Planned Page Links**: {new_file.name}")
# Load and validate files
with st.spinner("Reading your files..."):
kalicube_graph_old, kalicube_nodes_old, kalicube_url_mapping_old = \
load_graph_from_csv_grape(old_content, old_file.name)
kalicube_graph_new, kalicube_nodes_new, kalicube_url_mapping_new = \
load_graph_from_csv_grape(new_content, new_file.name)
if kalicube_graph_old is not None and kalicube_graph_new is not None:
# Show what we found
st.markdown("### πŸ“ˆ **What We Found in Your Files**")
info_col1, info_col2 = st.columns(2)
with info_col1:
st.info(f"""
**Current Setup:**
- {len(kalicube_nodes_old)} pages
- {kalicube_graph_old.get_number_of_edges()} links between them
""")
with info_col2:
st.info(f"""
**Planned Setup:**
- {len(kalicube_nodes_new)} pages
- {kalicube_graph_new.get_number_of_edges()} links between them
""")
# Big, obvious run button
st.markdown("### πŸš€ **Step 3: Run the Test**")
run_button = st.button("πŸ”¬ Test My Changes", type="primary", use_container_width=True)
if run_button or auto_run:
# Progress with encouraging messages
progress_bar = st.progress(0)
status_text = st.empty()
encouraging_messages = [
"πŸ”¬ Setting up internet simulation...",
"🌐 Connecting your pages to the web...",
"πŸ“Š Calculating page importance scores...",
"🎯 Running tests with different scenarios...",
"πŸ“ˆ Almost done! Analyzing results..."
]
all_results = []
all_comparisons = []
start_time = time.time()
# Run tests with encouragement
for i in range(num_tests):
msg_idx = min(i // max(1, num_tests // len(encouraging_messages)), len(encouraging_messages) - 1)
status_text.text(f"{encouraging_messages[msg_idx]} (Test {i+1}/{num_tests})")
progress_bar.progress((i + 1) / num_tests)
result, comparison_df = run_single_simulation(
i, kalicube_graph_old, kalicube_graph_new,
kalicube_nodes_old, kalicube_nodes_new,
www_nodes, 2, 5, 25 # simplified parameters
)
if result is not None:
all_results.append(result)
all_comparisons.append(comparison_df)
end_time = time.time()
# Clear progress
progress_bar.empty()
status_text.empty()
if all_results:
results_df = pd.DataFrame(all_results)
all_comparisons_df = pd.concat(all_comparisons, ignore_index=True) if all_comparisons else pd.DataFrame()
# Show results
st.success(f"πŸŽ‰ **Test Complete!** Ran {len(all_results)} tests in {end_time - start_time:.0f} seconds")
# Create simple visualizations
create_simple_visualizations(results_df, all_comparisons_df, confidence_threshold)
# Download section
st.markdown("### πŸ’Ύ **Save Your Results**")
col1, col2 = st.columns(2)
with col1:
csv_summary = results_df.to_csv(index=False)
st.download_button(
label="πŸ“Š Download Summary Report",
data=csv_summary,
file_name=f"website_impact_summary_{int(time.time())}.csv",
mime="text/csv"
)
with col2:
if not all_comparisons_df.empty:
csv_detailed = all_comparisons_df.to_csv(index=False)
st.download_button(
label="πŸ“‹ Download Detailed Results",
data=csv_detailed,
file_name=f"website_impact_detailed_{int(time.time())}.csv",
mime="text/csv"
)
# Show detailed results if requested
if show_details and not all_comparisons_df.empty:
st.markdown("### πŸ” **Detailed Results** (For the curious)")
# Simple filter
st.markdown("**Filter results:**")
filter_col1, filter_col2 = st.columns(2)
with filter_col1:
min_change = st.number_input("Show changes above (%)",
value=float(all_comparisons_df['Importance_Change_%'].min()),
step=0.1)
# Apply filter and show
filtered_df = all_comparisons_df[all_comparisons_df['Importance_Change_%'] >= min_change]
# Rename columns for clarity
display_df = filtered_df.copy()
display_df = display_df.rename(columns={
'Page_URL': 'Page URL',
'Importance_Change_%': 'Impact (%)',
'Rank_Change': 'Rank Change',
'Test_Number': 'Test #'
})
st.dataframe(
display_df[['Page URL', 'Impact (%)', 'Rank_Change', 'Test #']].sort_values('Impact (%)', ascending=False),
use_container_width=True,
height=300
)
else:
st.error("❌ No test results generated. Please check your files and try again.")
else:
# Help section when no files uploaded
st.markdown("---")
# File format help
with st.expander("πŸ“‹ **File Format Help** - How to prepare your CSV files"):
st.markdown("""
### βœ… **Correct Format**
Your CSV files need exactly these column names:
- **FROM** = the page that has the link
- **TO** = the page being linked to
### πŸ“ **Example:**
```
FROM,TO
mysite.com/about,mysite.com/contact
mysite.com/blog/post1,partner.com/resource
partner.com/page,mysite.com/services
```
### πŸ’‘ **Tips:**
- Use any spreadsheet program (Excel, Google Sheets) to create these
- Save as CSV format
- Include full URLs or page paths
- Make sure page URLs are consistent (mysite.com/page vs mysite.com/page/ are different!)
- Each row represents one link from one page to another
""")
with st.expander("πŸ€” **What This Tool Actually Does** - Explained Simply"):
st.markdown("""
### 🌐 **The Big Picture**
When you change links between your pages, it affects how search engines see your site. But it's hard to predict the exact impact because the internet is huge and constantly changing.
### πŸ§ͺ **Our Solution: Virtual Testing**
1. **We simulate the internet** - Create a virtual version with thousands of pages
2. **We test your changes** - Run your current page links vs. your planned links
3. **We repeat many times** - Each test uses slightly different internet conditions
4. **We analyze the pattern** - Look at whether your changes usually help or hurt
### 🚦 **The Traffic Light System**
- **🟒 Green = Go ahead** - Most tests show your changes help
- **🟑 Yellow = Be careful** - Mixed results or small impact
- **πŸ”΄ Red = Stop** - Most tests show your changes hurt
### 🎯 **Why This Works**
Instead of guessing, you get data-driven confidence about your page link changes!
### πŸ‡ **Powered by Grape**
This version uses Grape, a high-performance graph library that's much faster than traditional tools for analyzing large networks.
""")
with st.expander("❓ **Common Questions**"):
st.markdown("""
**Q: How accurate is this?**
A: The tool shows trends and probabilities, not exact predictions. It's like weather forecasting - very useful for planning!
**Q: How long does it take?**
A: Usually 30 seconds to 2 minutes, depending on your settings. Grape makes it faster than before!
**Q: What if I get yellow results?**
A: Yellow means proceed carefully. Consider running more tests, getting expert advice, or monitoring closely if you implement.
**Q: Can I test multiple scenarios?**
A: Yes! Just upload different "planned changes" files to compare options.
**Q: What file size limits?**
A: Works best with up to 10,000 page links. Larger files may be slow.
**Q: What's the difference between pages and websites?**
A: Pages are specific URLs (like mysite.com/about), while websites are domains (like mysite.com). This tool analyzes individual page links.
**Q: What's new with Grape?**
A: Grape is a high-performance graph library that makes calculations much faster and can handle larger datasets more efficiently than NetworkX.
""")
if __name__ == "__main__":
main()