import streamlit as st
from dotenv import load_dotenv
import streamlit.components.v1 as components
from analyzer import CodeAnalyzer
from typing import Any, Iterable, List
import os
import html
# Force reload environment variables
load_dotenv(override=True)
st.set_page_config(
page_title="Matrix Code Analyzer",
page_icon="🧠",
layout="wide"
)
# Debug sidebar removed for cleaner UI
CUSTOM_CSS = """
"""
st.markdown(CUSTOM_CSS, unsafe_allow_html=True)
st.markdown(
"""
""",
unsafe_allow_html=True,
)
@st.cache_resource
def get_analyzer():
# Use /tmp for cache in Docker/HF Spaces (always writable)
# Use .analyzer_cache for local development
cache_dir = "/tmp/.analyzer_cache" if os.path.exists("/app") else ".analyzer_cache"
return CodeAnalyzer(cache_dir=cache_dir)
analyzer = get_analyzer()
AVAILABLE_MODELS = analyzer.available_models
if not AVAILABLE_MODELS:
st.error("No AI models configured. Add API keys to your .env file and restart the app.")
st.stop()
LANGUAGE_DISPLAY = {
"auto": "Auto Detect",
"python": "Python",
"javascript": "JavaScript",
"typescript": "TypeScript",
"html": "HTML",
"css": "CSS",
"java": "Java",
"cpp": "C++",
"c": "C",
"csharp": "C#",
"go": "Go",
"rust": "Rust",
"php": "PHP",
"ruby": "Ruby",
"swift": "Swift",
"kotlin": "Kotlin",
}
LANGUAGE_OPTIONS = list(LANGUAGE_DISPLAY.keys())
def ensure_list(items: Any) -> List[str]:
if not items:
return []
if isinstance(items, str):
clean = items.strip()
return [clean] if clean else []
if isinstance(items, dict):
return [f"{key}: {value}" for key, value in items.items() if str(value).strip()]
if isinstance(items, Iterable):
values = []
for entry in items:
if entry is None:
continue
text = str(entry).strip()
if text:
values.append(text)
return values
return [str(items)]
def parse_score(raw: Any) -> float:
try:
return float(raw)
except (TypeError, ValueError):
return 0.0
def score_badge(score: float) -> tuple[str, str]:
if score >= 80:
return "Excellent", "#00fba4"
if score >= 60:
return "Review Suggested", "#ffd76a"
return "Needs Attention", "#ff6b6b"
def render_list_section(title: str, icon: str, content: Any, fallback: str | None = None) -> None:
entries = ensure_list(content)
if entries:
items_html = "".join(f"{html.escape(entry)}" for entry in entries[:6])
st.markdown(
f"""
""",
unsafe_allow_html=True,
)
elif fallback:
st.markdown(
f"""
{icon} {title}
{html.escape(fallback)}
""",
unsafe_allow_html=True,
)
def render_code_result(result: dict[str, Any], model_label: str) -> None:
if result.get("error"):
st.error(f"Analysis failed: {result['error']}")
return
score = parse_score(result.get("quality_score", 0))
status_label, status_color = score_badge(score)
language = (result.get("language") or "auto").upper()
line_count = result.get("line_count", "-")
exec_time = parse_score(result.get("execution_time", 0.0))
cached_text = "Hit" if result.get("cached") else "Fresh"
stats = [
{"label": "Quality Score", "value": f"{int(round(score))}/100", "sub": status_label, "color": status_color},
{"label": "Language", "value": language, "sub": "Detected" if language != "AUTO" else "Auto"},
{"label": "Lines", "value": line_count, "sub": "Analyzed"},
{"label": "Latency", "value": f"{exec_time:.1f}s", "sub": "Runtime"},
{"label": "Cache", "value": cached_text, "sub": "Result Store"},
]
# Render stats using native Streamlit components to avoid raw HTML showing
st.markdown(
f"""
""",
unsafe_allow_html=True,
)
cols = st.columns(len(stats))
for idx, stat in enumerate(stats):
with cols[idx]:
st.metric(label=stat["label"], value=stat["value"], delta=stat["sub"])
summary = result.get("summary")
if summary:
st.markdown(
f"""
📋 Overview
{html.escape(summary)}
""",
unsafe_allow_html=True,
)
render_list_section("Highlights", "✨", result.get("strengths"))
render_list_section("Bug Detection", "🐞", result.get("bugs") or result.get("issues"), "No critical bugs were flagged.")
render_list_section("Security", "🔒", result.get("security_vulnerabilities") or result.get("security_concerns"), "No security vulnerabilities detected.")
render_list_section("Code Quality", "🧩", result.get("quality_issues"), "Structure looks solid and maintainable.")
render_list_section("Quick Fixes", "⚡", result.get("quick_fixes"), "No urgent fixes suggested.")
render_list_section("Suggestions", "💡", result.get("suggestions"))
raw = result.get("raw_response")
if raw:
with st.expander("View full model response", expanded=False):
st.code(raw, language="text")
def render_repo_result(result: dict[str, Any], model_label: str) -> None:
if result.get("error"):
st.error(f"Repository analysis failed: {result['error']}")
return
info = result.get("repository_info", {})
repo_name = info.get("name", "Repository")
repo_desc = info.get("description") or "No description provided."
repo_url = result.get("repo_url") or st.session_state.get("repo_analysis_url")
repo_stats = [
{"label": "Primary Language", "value": info.get("language", "Unknown"), "sub": "Detected"},
{"label": "Stars", "value": info.get("stars", 0), "sub": "Community"},
{"label": "Forks", "value": info.get("forks", 0), "sub": "Collaboration"},
{"label": "Size", "value": f"{info.get('size', 0)} KB", "sub": "Repo Size"},
{"label": "Latency", "value": f"{parse_score(result.get('execution_time', 0.0)):.1f}s", "sub": "Runtime"},
]
link_html = f'View repository ↗
' if repo_url else ""
# Header card
st.markdown(
f"""
🤖 {html.escape(model_label)}
{html.escape(repo_name)}
{html.escape(repo_desc)}
{link_html}
""",
unsafe_allow_html=True,
)
# Metrics row
cols = st.columns(len(repo_stats))
for idx, stat in enumerate(repo_stats):
with cols[idx]:
st.metric(label=stat["label"], value=stat["value"], delta=stat["sub"])
overview = result.get("project_overview")
if overview:
st.markdown(
f"""
📋 Project Overview
{html.escape(overview)}
""",
unsafe_allow_html=True,
)
render_list_section("Architecture Quality", "🏗️", result.get("architecture_quality"), "Project structure looks well organized.")
render_list_section("Critical Issues", "🚨", result.get("critical_issues"), "No critical issues were highlighted.")
render_list_section("Priority Improvements", "🎯", result.get("improvement_priorities"), "No immediate improvements suggested.")
render_list_section("Onboarding Guide", "🚀", result.get("onboarding_guide"), "No specific onboarding steps identified.")
render_list_section("Tech Stack", "🛠️", result.get("tech_stack_rationale"), "Tech stack details were not identified.")
render_list_section("API Endpoints", "🔌", result.get("api_endpoint_summary"), "No API endpoints were identified.")
raw = result.get("raw_response")
if raw:
with st.expander("View full model response", expanded=False):
st.code(raw, language="text")
if "code_input" not in st.session_state:
st.session_state.code_input = ""
if "code_file_meta" not in st.session_state:
st.session_state.code_file_meta = None
if "code_analysis_result" not in st.session_state:
st.session_state.code_analysis_result = None
if "code_analysis_model" not in st.session_state:
st.session_state.code_analysis_model = ""
if "repo_analysis_result" not in st.session_state:
st.session_state.repo_analysis_result = None
if "repo_analysis_model" not in st.session_state:
st.session_state.repo_analysis_model = ""
if "repo_analysis_url" not in st.session_state:
st.session_state.repo_analysis_url = ""
st.markdown(
"""
CODE ANALYZER
AI Code Analyzer
Inspect bugs, surface security gaps, and review repositories with instant feedback.
🧠 Multi-model
🔍 Bug & Security Scan
⚡ Instant Results
📦 GitHub Ready
""",
unsafe_allow_html=True,
)
code_tab, repo_tab = st.tabs(["Code Analysis", "Repository Insights"])
with code_tab:
model_keys = list(AVAILABLE_MODELS.keys())
selected_model_code = st.selectbox(
"AI Model",
options=model_keys,
format_func=lambda key: AVAILABLE_MODELS[key],
key="code_model_select",
)
selected_language = st.selectbox(
"Language",
LANGUAGE_OPTIONS,
format_func=lambda code: LANGUAGE_DISPLAY[code],
key="language_select",
)
uploaded_file = st.file_uploader(
"Upload a code file",
type=["py", "js", "java", "cpp", "c", "cs", "go", "rs", "php", "rb", "swift", "kt", "txt"],
key="code_file_uploader",
)
if uploaded_file is not None:
raw_bytes = uploaded_file.read()
try:
decoded = raw_bytes.decode("utf-8")
except UnicodeDecodeError:
st.error("Only UTF-8 encoded files are supported.")
else:
st.session_state.code_input = decoded
st.session_state.code_file_meta = {
"name": uploaded_file.name,
"size": len(raw_bytes),
"lines": len(decoded.splitlines()),
}
st.text_area(
"Or paste code below",
key="code_input",
height=320,
placeholder="Paste any code snippet to inspect bugs, security gaps, and quality issues.",
)
meta = st.session_state.get("code_file_meta")
if meta:
st.markdown(
f"""
Active File
{meta['name']}
{meta['lines']} lines
{meta['size']} bytes
""",
unsafe_allow_html=True,
)
analyze_code_clicked = st.button("Run Code Analysis", key="code_analyze_button", use_container_width=True)
if analyze_code_clicked:
snippet = st.session_state.get("code_input", "").strip()
if not snippet:
st.error("Please upload a file or paste some code to analyze.")
else:
with st.spinner("Analyzing code..."):
language_arg = None if selected_language == "auto" else selected_language
result = analyzer.analyze_code(snippet, selected_model_code, language_arg)
st.session_state.code_analysis_result = result
st.session_state.code_analysis_model = AVAILABLE_MODELS[selected_model_code]
if st.session_state.get("code_analysis_result"):
render_code_result(st.session_state.code_analysis_result, st.session_state.get("code_analysis_model", ""))
else:
st.info("Upload a file or paste code to generate an analysis.")
with repo_tab:
model_keys = list(AVAILABLE_MODELS.keys())
selected_model_repo = st.selectbox(
"AI Model",
options=model_keys,
format_func=lambda key: AVAILABLE_MODELS[key],
key="repo_model_select",
)
st.text_input(
"GitHub repository URL",
placeholder="https://github.com/owner/repo",
key="repo_analysis_url",
)
analyze_repo_clicked = st.button("Analyze Repository", key="repo_analyze_button", use_container_width=True)
if analyze_repo_clicked:
repo_url = st.session_state.get("repo_analysis_url", "").strip()
if not repo_url:
st.error("Enter a GitHub repository URL.")
else:
with st.spinner("Inspecting repository..."):
result = analyzer.analyze_github_repo(repo_url, selected_model_repo)
st.session_state.repo_analysis_result = result
st.session_state.repo_analysis_model = AVAILABLE_MODELS[selected_model_repo]
if st.session_state.get("repo_analysis_result"):
render_repo_result(
st.session_state.repo_analysis_result,
st.session_state.get("repo_analysis_model", ""),
)
else:
st.info("Provide a public GitHub repository URL to review its structure, issues, and improvements.")