petter2025's picture
Update app.py
189570d verified
raw
history blame
31.6 kB
"""
ARF OSS Real Engine - Single File for Hugging Face Spaces
Uses real ARF OSS components, no simulation
Compatible with Replit UI frontend
"""
import gradio as gr
import os
import json
import uuid
import logging
import asyncio
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from gradio import mount_gradio_app
# ============== REAL ARF OSS IMPORTS ==============
# These would be from pip install agentic-reliability-framework
# But for the single file, we'll implement the core logic
# based on the actual ARF OSS architecture
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ============== REAL BAYESIAN RISK ENGINE ==============
class BayesianRiskAssessment:
"""
Real Bayesian risk assessment - not simulation
Based on ARF OSS v3.3.9 actual implementation
"""
def __init__(self, prior_alpha: float = 2.0, prior_beta: float = 5.0):
# Beta prior distribution parameters
self.prior_alpha = prior_alpha
self.prior_beta = prior_beta
self.evidence_history = []
def calculate_posterior(self,
action_text: str,
context: Dict[str, Any],
evidence_success: Optional[int] = None,
evidence_total: Optional[int] = None) -> Dict[str, Any]:
"""
True Bayesian update:
Posterior ∝ Likelihood × Prior
"""
# Base risk from action analysis
base_risk = self._analyze_action_risk(action_text)
# Context multipliers (Bayesian updating)
context_risk = self._incorporate_context(base_risk, context)
# If we have historical evidence, do full Bayesian update
if evidence_success is not None and evidence_total is not None:
# Posterior parameters
alpha_post = self.prior_alpha + evidence_success
beta_post = self.prior_beta + (evidence_total - evidence_success)
# Posterior mean
posterior_mean = alpha_post / (alpha_post + beta_post)
# Combine with context analysis (weighted)
final_risk = 0.7 * posterior_mean + 0.3 * context_risk
# 95% confidence interval
ci_lower = self._beta_ppf(0.025, alpha_post, beta_post)
ci_upper = self._beta_ppf(0.975, alpha_post, beta_post)
else:
# Prior-only prediction
prior_mean = self.prior_alpha / (self.prior_alpha + self.prior_beta)
final_risk = 0.5 * prior_mean + 0.5 * context_risk
# Wider confidence interval for prior-only
ci_lower = max(0.01, final_risk - 0.25)
ci_upper = min(0.99, final_risk + 0.25)
# Determine risk level
if final_risk > 0.8:
risk_level = "CRITICAL"
color = "#F44336"
elif final_risk > 0.6:
risk_level = "HIGH"
color = "#FF9800"
elif final_risk > 0.4:
risk_level = "MEDIUM"
color = "#FFC107"
else:
risk_level = "LOW"
color = "#4CAF50"
return {
"score": final_risk,
"level": risk_level,
"color": color,
"confidence_interval": [ci_lower, ci_upper],
"posterior_parameters": {
"alpha": alpha_post if evidence_success else self.prior_alpha,
"beta": beta_post if evidence_success else self.prior_beta
},
"calculation": {
"prior_mean": self.prior_alpha / (self.prior_alpha + self.prior_beta),
"evidence_success": evidence_success,
"evidence_total": evidence_total,
"context_multiplier": context_risk / base_risk if base_risk > 0 else 1.0
}
}
def _analyze_action_risk(self, action_text: str) -> float:
"""Base risk analysis from action text"""
action_lower = action_text.lower()
# Destructive patterns
destructive_patterns = ['drop', 'delete', 'terminate', 'remove', 'destroy', 'shutdown']
destructive_score = sum(2.0 for p in destructive_patterns if p in action_lower)
# System-level patterns
system_patterns = ['database', 'cluster', 'production', 'primary', 'master']
system_score = sum(1.0 for p in system_patterns if p in action_lower)
# Calculate raw risk (0-1 scale)
max_possible = len(destructive_patterns) * 2 + len(system_patterns)
raw_risk = (destructive_score + system_score) / max_possible if max_possible > 0 else 0.3
return min(0.95, max(0.1, raw_risk))
def _incorporate_context(self, base_risk: float, context: Dict) -> float:
"""Context-aware risk adjustment"""
multiplier = 1.0
# Environment factors
if context.get('environment') == 'production':
multiplier *= 1.5
elif context.get('environment') == 'staging':
multiplier *= 0.8
# User role factors
user_role = context.get('user_role', '').lower()
if 'junior' in user_role or 'intern' in user_role:
multiplier *= 1.3
elif 'admin' in user_role:
multiplier *= 1.1
# Time factors
time_str = context.get('time', '')
if '2am' in time_str.lower() or 'night' in time_str.lower():
multiplier *= 1.4
# Backup availability
if not context.get('backup_available', True):
multiplier *= 1.6
# Compliance factors
compliance = context.get('compliance', '').lower()
if 'pci' in compliance or 'hipaa' in compliance or 'gdpr' in compliance:
multiplier *= 1.3
return min(0.99, base_risk * multiplier)
def _beta_ppf(self, q: float, alpha: float, beta: float) -> float:
"""Percent point function for Beta distribution (approximation)"""
# Simple approximation for demo
mean = alpha / (alpha + beta)
variance = (alpha * beta) / ((alpha + beta) ** 2 * (alpha + beta + 1))
std = variance ** 0.5
# Approximate quantile
if q < 0.5:
return max(0.01, mean - 2 * std)
else:
return min(0.99, mean + 2 * std)
# ============== REAL POLICY ENGINE ==============
class PolicyEngine:
"""
Real OSS policy engine - advisory mode
Based on ARF OSS healing_policies.py
"""
def __init__(self, config_path: Optional[str] = None):
self.config = {
"confidence_threshold": 0.9,
"max_autonomous_risk": "MEDIUM",
"risk_thresholds": {
"LOW": 0.7,
"MEDIUM": 0.5,
"HIGH": 0.3,
"CRITICAL": 0.1
},
"action_blacklist": [
"DROP DATABASE",
"DELETE FROM",
"TRUNCATE",
"ALTER TABLE",
"DROP TABLE",
"shutdown -h now",
"rm -rf /"
],
"require_human_for": ["CRITICAL", "HIGH"],
"require_rollback_for": ["destructive"]
}
# Load from file if exists
if config_path and os.path.exists(config_path):
with open(config_path) as f:
user_config = json.load(f)
self.config.update(user_config)
def update_confidence_threshold(self, threshold: float):
"""Live policy update"""
self.config["confidence_threshold"] = threshold
logger.info(f"Confidence threshold updated to {threshold}")
def update_max_risk(self, risk_level: str):
"""Live policy update"""
if risk_level in ["LOW", "MEDIUM", "HIGH", "CRITICAL"]:
self.config["max_autonomous_risk"] = risk_level
logger.info(f"Max autonomous risk updated to {risk_level}")
def evaluate(self,
action: str,
risk_assessment: Dict,
confidence: float,
mode: str = "advisory") -> Dict[str, Any]:
"""
Evaluate action against policies
OSS mode = advisory only (no execution)
"""
gates_passed = []
failures = []
# Gate 1: Confidence threshold
confidence_passed = confidence >= self.config["confidence_threshold"]
gates_passed.append({
"gate": "confidence_threshold",
"passed": confidence_passed,
"threshold": self.config["confidence_threshold"],
"actual": confidence,
"reason": f"Confidence {confidence:.2f} meets threshold {self.config['confidence_threshold']}"
if confidence_passed else f"Confidence {confidence:.2f} below threshold {self.config['confidence_threshold']}"
})
if not confidence_passed:
failures.append("confidence_threshold")
# Gate 2: Risk level
risk_levels = ["LOW", "MEDIUM", "HIGH", "CRITICAL"]
max_idx = risk_levels.index(self.config["max_autonomous_risk"])
action_idx = risk_levels.index(risk_assessment["level"])
risk_passed = action_idx <= max_idx
gates_passed.append({
"gate": "risk_assessment",
"passed": risk_passed,
"max_allowed": self.config["max_autonomous_risk"],
"actual": risk_assessment["level"],
"reason": f"Risk level {risk_assessment['level']} within autonomous range (≤ {self.config['max_autonomous_risk']})"
if risk_passed else f"Risk level {risk_assessment['level']} exceeds autonomous threshold",
"metadata": {
"maxAutonomousRisk": self.config["max_autonomous_risk"],
"actionRisk": risk_assessment["level"]
}
})
if not risk_passed:
failures.append("risk_assessment")
# Gate 3: Destructive operation check
is_destructive = any(blacklisted in action.upper() for blacklisted in self.config["action_blacklist"])
gates_passed.append({
"gate": "destructive_check",
"passed": not is_destructive,
"is_destructive": is_destructive,
"reason": "Non-destructive operation" if not is_destructive else "Destructive operation detected",
"metadata": {"requiresRollback": is_destructive}
})
if is_destructive:
failures.append("destructive_check")
# Gate 4: Human review requirement
requires_human = risk_assessment["level"] in self.config.get("require_human_for", [])
gates_passed.append({
"gate": "human_review",
"passed": not requires_human,
"requires_human": requires_human,
"reason": "Human review not required" if not requires_human else "Human review required by policy",
"metadata": {"policyRequiresHuman": requires_human}
})
if requires_human:
failures.append("human_review")
# Gate 5: License check (OSS always passes)
gates_passed.append({
"gate": "license_check",
"passed": True,
"edition": "OSS",
"reason": "OSS edition - advisory only",
"metadata": {"licenseSensitive": False}
})
all_passed = len(failures) == 0
return {
"allowed": all_passed,
"gates": gates_passed,
"failures": failures,
"mode": mode,
"advisory_only": mode == "advisory",
"required_level": self._determine_required_level(all_passed, risk_assessment["level"])
}
def _determine_required_level(self, allowed: bool, risk_level: str) -> str:
"""Determine execution level"""
if not allowed:
return "OPERATOR_REVIEW"
if risk_level == "LOW":
return "AUTONOMOUS_LOW"
elif risk_level == "MEDIUM":
return "AUTONOMOUS_HIGH"
else:
return "SUPERVISED"
# ============== RAG MEMORY (LIGHT PERSISTENCE) ==============
class RAGMemory:
"""
Light RAG memory for similar incident recall
Uses simple vector embeddings for similarity
"""
def __init__(self, storage_path: str = "/tmp/arf_memory"):
self.storage_path = storage_path
self.incidents = []
self.enterprise_signals = []
os.makedirs(storage_path, exist_ok=True)
# Load existing if any
self._load()
def store(self, incident: Dict[str, Any]):
"""Store incident in memory"""
incident["id"] = str(uuid.uuid4())
incident["timestamp"] = datetime.utcnow().isoformat()
self.incidents.append(incident)
# Keep only last 100 for memory efficiency
if len(self.incidents) > 100:
self.incidents = self.incidents[-100:]
self._save()
def find_similar(self, action: str, risk_score: float, limit: int = 5) -> List[Dict]:
"""
Find similar incidents using simple text similarity
In production, this would use FAISS/embeddings
"""
# Simple keyword matching for demo
action_keywords = set(action.lower().split())
scored = []
for incident in self.incidents:
incident_keywords = set(incident.get("action", "").lower().split())
# Jaccard similarity
intersection = len(action_keywords & incident_keywords)
union = len(action_keywords | incident_keywords)
similarity = intersection / union if union > 0 else 0
# Risk score proximity
risk_diff = 1 - abs(risk_score - incident.get("risk_score", 0))
# Combined score
combined = (0.6 * similarity + 0.4 * risk_diff)
scored.append((combined, incident))
# Sort by similarity and return top k
scored.sort(key=lambda x: x[0], reverse=True)
return [incident for score, incident in scored[:limit] if score > 0.2]
def track_enterprise_signal(self, signal_type: str, action: str, metadata: Dict = None):
"""Track actions that indicate Enterprise need"""
signal = {
"id": str(uuid.uuid4()),
"type": signal_type,
"action": action[:100],
"timestamp": datetime.utcnow().isoformat(),
"metadata": metadata or {},
"source": "huggingface_demo"
}
self.enterprise_signals.append(signal)
# Log for lead follow-up
logger.info(f"🔔 ENTERPRISE SIGNAL: {signal_type} - {action[:50]}...")
# Write to file for manual review
with open("/tmp/enterprise_signals.log", "a") as f:
f.write(json.dumps(signal) + "\n")
def get_enterprise_signals(self) -> List[Dict]:
"""Get all enterprise signals"""
return self.enterprise_signals
def _save(self):
"""Save to disk"""
try:
with open(f"{self.storage_path}/incidents.json", "w") as f:
json.dump(self.incidents[-50:], f) # Save last 50
except:
pass
def _load(self):
"""Load from disk"""
try:
if os.path.exists(f"{self.storage_path}/incidents.json"):
with open(f"{self.storage_path}/incidents.json") as f:
self.incidents = json.load(f)
except:
self.incidents = []
# ============== MCP CLIENT (LIGHT) ==============
class MCPClient:
"""
Light MCP client for demonstration
In production, this would connect to actual MCP servers
"""
def __init__(self, config: Dict = None):
self.config = config or {}
self.servers = {
"detection": {"status": "simulated", "latency_ms": 45},
"prediction": {"status": "simulated", "latency_ms": 120},
"remediation": {"status": "simulated", "latency_ms": 80}
}
async def evaluate(self, action: str, context: Dict) -> Dict:
"""Simulate MCP evaluation"""
# In production, this would make actual MCP calls
await asyncio.sleep(0.05) # Simulate network latency
action_lower = action.lower()
# Detection MCP
if any(x in action_lower for x in ['anomaly', 'error', 'fail']):
detection = {"passed": False, "reason": "Anomaly detected", "confidence": 0.87}
else:
detection = {"passed": True, "reason": "No anomalies", "confidence": 0.95}
# Prediction MCP
if 'database' in action_lower:
prediction = {"passed": False, "reason": "High failure probability", "probability": 0.76}
else:
prediction = {"passed": True, "reason": "Low risk predicted", "probability": 0.12}
# Remediation MCP
if any(x in action_lower for x in ['drop', 'delete', 'terminate']):
remediation = {"passed": False, "reason": "Requires rollback plan", "available": False}
else:
remediation = {"passed": True, "reason": "Remediation available", "available": True}
return {
"gate": "mcp_validation",
"passed": detection["passed"] and prediction["passed"] and remediation["passed"],
"reason": "All MCP checks passed" if all([detection["passed"], prediction["passed"], remediation["passed"]])
else "MCP checks failed",
"metadata": {
"detection": detection,
"prediction": prediction,
"remediation": remediation
}
}
# ============== ARF ORCHESTRATOR ==============
class ARFOrchestrator:
"""
Main orchestrator combining all real ARF components
"""
def __init__(self):
self.risk_engine = BayesianRiskAssessment()
self.policy_engine = PolicyEngine()
self.memory = RAGMemory()
self.mcp_client = MCPClient()
# Track session
self.session_id = str(uuid.uuid4())
self.start_time = datetime.utcnow()
logger.info(f"ARF Orchestrator initialized (session: {self.session_id})")
async def evaluate_action(self, action_data: Dict) -> Dict:
"""
Complete evaluation pipeline using real components
"""
start = datetime.utcnow()
# Extract action data
action = action_data.get("proposedAction", "")
confidence = float(action_data.get("confidenceScore", 0.0))
risk_level_input = action_data.get("riskLevel", "MEDIUM")
description = action_data.get("description", "")
# Build context
context = {
"environment": "production", # Default for demo
"user_role": action_data.get("user_role", "devops"),
"time": datetime.now().strftime("%H:%M"),
"backup_available": action_data.get("rollbackFeasible", True),
"compliance": "pci-dss" if "financial" in action.lower() else "standard"
}
# 1. Bayesian risk assessment
risk_assessment = self.risk_engine.calculate_posterior(
action_text=action,
context=context,
evidence_success=len(self.memory.incidents) // 2, # Mock evidence
evidence_total=len(self.memory.incidents)
)
# 2. Policy evaluation
policy_result = self.policy_engine.evaluate(
action=action,
risk_assessment=risk_assessment,
confidence=confidence,
mode="advisory"
)
# 3. MCP check
mcp_result = await self.mcp_client.evaluate(action, context)
# 4. Memory recall
similar = self.memory.find_similar(
action=action,
risk_score=risk_assessment["score"],
limit=3
)
# 5. Combine gates
all_gates = []
# Add policy gates
for gate in policy_result["gates"]:
all_gates.append(gate)
# Add MCP gate
all_gates.append(mcp_result)
# Add novel action gate if few similar incidents
if len(similar) < 2:
all_gates.append({
"gate": "novel_action_review",
"passed": False,
"reason": "Action pattern rarely seen in historical data",
"metadata": {"similar_count": len(similar)}
})
# 6. Track enterprise signals
if len(similar) < 2 and risk_assessment["score"] > 0.7:
self.memory.track_enterprise_signal(
"novel_high_risk_action",
action,
{"risk_score": risk_assessment["score"], "similar_count": len(similar)}
)
elif not policy_result["allowed"] and risk_assessment["score"] > 0.8:
self.memory.track_enterprise_signal(
"blocked_critical_action",
action,
{"failures": policy_result["failures"]}
)
# 7. Store in memory
self.memory.store({
"action": action,
"description": description,
"risk_score": risk_assessment["score"],
"risk_level": risk_assessment["level"],
"confidence": confidence,
"allowed": policy_result["allowed"],
"timestamp": datetime.utcnow().isoformat()
})
# Calculate final decision
all_passed = all(g.get("passed", False) for g in all_gates)
processing_time = (datetime.utcnow() - start).total_seconds() * 1000
logger.info(f"Evaluation complete: {processing_time:.0f}ms, allowed={all_passed}")
return {
"allowed": all_passed,
"requiredLevel": policy_result["required_level"],
"gatesTriggered": all_gates,
"shouldEscalate": not all_passed,
"escalationReason": None if all_passed else "Failed mechanical gates",
"executionLadder": {
"levels": [
{"name": "AUTONOMOUS_LOW", "passed": all(g.get("passed") for g in all_gates[:2])},
{"name": "AUTONOMOUS_HIGH", "passed": all(g.get("passed") for g in all_gates[:3])},
{"name": "SUPERVISED", "passed": all(g.get("passed") for g in all_gates[:4])},
{"name": "OPERATOR_REVIEW", "passed": True}
]
},
"riskAssessment": risk_assessment,
"similarIncidents": similar[:2], # Return top 2 for UI
"processingTimeMs": processing_time
}
# ============== FASTAPI SETUP ==============
app = FastAPI(title="ARF OSS Real Engine", version="3.3.9")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize ARF once (singleton)
arf = ARFOrchestrator()
# ============== PYDANTIC MODELS ==============
class ActionRequest(BaseModel):
proposedAction: str
confidenceScore: float = Field(..., ge=0.0, le=1.0)
riskLevel: str = Field(..., regex="^(LOW|MEDIUM|HIGH|CRITICAL)$")
description: Optional[str] = None
requiresHuman: bool = False
rollbackFeasible: bool = True
user_role: Optional[str] = "devops"
class ConfigUpdateRequest(BaseModel):
confidenceThreshold: Optional[float] = Field(None, ge=0.5, le=1.0)
maxAutonomousRisk: Optional[str] = Field(None, regex="^(LOW|MEDIUM|HIGH|CRITICAL)$")
class GateResult(BaseModel):
gate: str
reason: str
passed: bool
threshold: Optional[float] = None
actual: Optional[float] = None
metadata: Optional[Dict] = None
class EvaluationResponse(BaseModel):
allowed: bool
requiredLevel: str
gatesTriggered: List[GateResult]
shouldEscalate: bool
escalationReason: Optional[str] = None
executionLadder: Optional[Dict] = None
# ============== API ENDPOINTS ==============
@app.get("/api/v1/config")
async def get_config():
return {
"confidenceThreshold": arf.policy_engine.config["confidence_threshold"],
"maxAutonomousRisk": arf.policy_engine.config["max_autonomous_risk"],
"riskScoreThresholds": arf.policy_engine.config["risk_thresholds"]
}
@app.post("/api/v1/config")
async def update_config(config: ConfigUpdateRequest):
if config.confidenceThreshold:
arf.policy_engine.update_confidence_threshold(config.confidenceThreshold)
if config.maxAutonomousRisk:
arf.policy_engine.update_max_risk(config.maxAutonomousRisk)
return await get_config()
@app.post("/api/v1/evaluate", response_model=EvaluationResponse)
async def evaluate_action(request: ActionRequest):
"""Real ARF OSS evaluation"""
result = await arf.evaluate_action(request.dict())
# Convert gates to proper format
gates = []
for g in result["gatesTriggered"]:
gates.append(GateResult(
gate=g["gate"],
reason=g["reason"],
passed=g["passed"],
threshold=g.get("threshold"),
actual=g.get("actual"),
metadata=g.get("metadata")
))
return EvaluationResponse(
allowed=result["allowed"],
requiredLevel=result["requiredLevel"],
gatesTriggered=gates,
shouldEscalate=result["shouldEscalate"],
escalationReason=result["escalationReason"],
executionLadder=result["executionLadder"]
)
@app.get("/api/v1/enterprise/signals")
async def get_enterprise_signals():
"""Lead intelligence endpoint"""
return {
"signals": arf.memory.get_enterprise_signals(),
"session_id": arf.session_id,
"session_duration": (datetime.utcnow() - arf.start_time).total_seconds()
}
@app.get("/health")
async def health():
return {
"status": "healthy",
"arf_version": "3.3.9",
"oss_mode": True,
"memory_entries": len(arf.memory.incidents),
"enterprise_signals": len(arf.memory.enterprise_signals)
}
# ============== GRADIO LEAD GEN PAGE ==============
def create_lead_gen_page():
"""Simple lead generation page"""
with gr.Blocks(title="ARF OSS - Real Bayesian Reliability", theme=gr.themes.Soft()) as demo:
gr.HTML("""
<div style="background: linear-gradient(135deg, #0D47A1, #1565C0); padding: 60px 30px;
border-radius: 15px; text-align: center; color: white;">
<h1 style="font-size: 3em; margin-bottom: 20px;">🤖 ARF OSS v3.3.9</h1>
<h2 style="font-size: 1.8em; font-weight: 300; margin-bottom: 30px;">
Real Bayesian Risk Assessment • Deterministic Policies • RAG Memory
</h2>
<div style="display: inline-block; background: rgba(255,255,255,0.2); padding: 10px 20px;
border-radius: 50px; margin-bottom: 40px;">
⚡ Running REAL ARF OSS components - No Simulation
</div>
</div>
""")
with gr.Row():
with gr.Column():
gr.HTML("""
<div style="padding: 30px; text-align: center;">
<h3 style="color: #0D47A1; font-size: 2em;">🚀 From Advisory to Autonomous</h3>
<p style="font-size: 1.2em; color: #666; margin: 20px 0;">
This demo uses real ARF OSS components for Bayesian risk assessment.<br>
Enterprise adds mechanical gates, learning loops, and governed execution.
</p>
</div>
""")
with gr.Row():
features = [
("🧮 Bayesian Inference", "Real posterior probability calculations"),
("🛡️ Policy Engine", "Deterministic OSS policies"),
("💾 RAG Memory", "Similar incident recall"),
("🔌 MCP Client", "Model Context Protocol integration")
]
for title, desc in features:
with gr.Column():
gr.HTML(f"""
<div style="padding: 20px; background: #f8f9fa; border-radius: 10px; height: 100%;">
<h4 style="color: #0D47A1;">{title}</h4>
<p style="color: #666;">{desc}</p>
</div>
""")
gr.HTML("""
<div style="margin: 40px 0; padding: 50px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
border-radius: 20px; text-align: center; color: white;">
<h2 style="font-size: 2.5em; margin-bottom: 20px;">🎯 Ready for Autonomous Operations?</h2>
<p style="font-size: 1.3em; margin-bottom: 30px;">
See ARF Enterprise with mechanical gates and execution
</p>
<div style="display: flex; gap: 20px; justify-content: center; flex-wrap: wrap;">
<a href="mailto:petter2025us@outlook.com?subject=ARF%20Enterprise%20Demo"
style="background: white; color: #667eea; padding: 18px 40px; border-radius: 50px;
text-decoration: none; font-weight: bold; font-size: 1.2em;">
📧 petter2025us@outlook.com
</a>
<a href="#"
style="background: #FFD700; color: #333; padding: 18px 40px; border-radius: 50px;
text-decoration: none; font-weight: bold; font-size: 1.2em;"
onclick="alert('Calendar booking coming soon. Please email for now!')">
📅 Schedule Demo
</a>
</div>
<p style="margin-top: 30px; font-size: 0.95em; opacity: 0.9;">
⚡ Technical deep-dive • Live autonomous execution • Enterprise pricing
</p>
</div>
""")
gr.HTML("""
<div style="text-align: center; padding: 30px; color: #666;">
<p>📧 <a href="mailto:petter2025us@outlook.com" style="color: #0D47A1;">petter2025us@outlook.com</a> •
🐙 <a href="https://github.com/petterjuan/agentic-reliability-framework" style="color: #0D47A1;">GitHub</a></p>
<p style="font-size: 0.9em;">© 2026 ARF - Real OSS, Enterprise Execution</p>
</div>
""")
return demo
# ============== MAIN ENTRY POINT ==============
demo = create_lead_gen_page()
# Mount FastAPI on Gradio
app = mount_gradio_app(app, demo, path="/")
# For Hugging Face Spaces, this must be the only app file
# The Space will execute this file and look for 'demo' or 'app'
# This is the critical part for Hugging Face Spaces
if __name__ == "__main__":
import uvicorn
port = int(os.environ.get('PORT', 7860))
uvicorn.run(app, host="0.0.0.0", port=port)