petter2025's picture
Update hf_demo.py
4e92bf6 verified
raw
history blame
40.7 kB
"""
ARF OSS v3.3.9 - Enterprise Lead Generation Engine
Compatible with Gradio 4.44.1 and Pydantic V2
"""
import os
# ๐Ÿ”ฅ CRITICAL: Force Gradio to use port 7860 for Hugging Face Spaces
os.environ['GRADIO_SERVER_PORT'] = '7860'
os.environ['GRADIO_SERVER_NAME'] = '0.0.0.0'
# ๐Ÿ”ฅ Prevent Gradio from auto-launching its own server
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
import json
import uuid
import hmac
import hashlib
import logging
import asyncio
import sqlite3
import requests
import fcntl # <-- NEW: for file locking
import sys
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from contextlib import contextmanager
from dataclasses import dataclass, asdict
from enum import Enum
import gradio as gr
# ๐Ÿ”ฅ Close any existing Gradio instances immediately after import
gr.close_all()
from fastapi import FastAPI, HTTPException, Depends, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from pydantic import BaseModel, Field, field_validator, ConfigDict # <-- NEW: ConfigDict
from pydantic_settings import BaseSettings, SettingsConfigDict # <-- NEW: SettingsConfigDict
from gradio import mount_gradio_app
# ============== SINGLE INSTANCE LOCK ==============
LOCK_FILE = '/tmp/arf_app.lock'
try:
lock_fd = open(LOCK_FILE, 'w')
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except (IOError, OSError):
print("Another instance is already running. Exiting.")
sys.exit(1)
# ==================================================
# ============== CONFIGURATION (Pydantic V2) ==============
class Settings(BaseSettings):
"""Centralized configuration using Pydantic Settings V2"""
# Hugging Face settings (aliased to match expected env vars)
hf_space_id: str = Field(default='local', alias='SPACE_ID')
hf_token: str = Field(default='', alias='HF_TOKEN')
# Persistence - HF persistent storage
data_dir: str = Field(
default='/data' if os.path.exists('/data') else './data',
alias='DATA_DIR'
)
# Lead generation
lead_email: str = "petter2025us@outlook.com"
calendly_url: str = "https://calendly.com/petter2025us/arf-demo"
# Webhook for lead alerts (set in HF secrets)
slack_webhook: str = Field(default='', alias='SLACK_WEBHOOK')
sendgrid_api_key: str = Field(default='', alias='SENDGRID_API_KEY')
# Security
api_key: str = Field(
default_factory=lambda: str(uuid.uuid4()),
alias='ARF_API_KEY'
)
# ARF defaults
default_confidence_threshold: float = 0.9
default_max_risk: str = "MEDIUM"
# Pydantic V2 configuration
model_config = SettingsConfigDict(
populate_by_name=True, # allows use of field names or aliases
extra='ignore', # ignore extra env vars
env_prefix='', # no prefix
case_sensitive=False # case-insensitive matching
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Ensure data directory exists
os.makedirs(self.data_dir, exist_ok=True)
settings = Settings()
# ============== LOGGING ==============
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(f'{settings.data_dir}/arf.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger('arf.oss')
# ============== ENUMS & TYPES ==============
class RiskLevel(str, Enum):
LOW = "LOW"
MEDIUM = "MEDIUM"
HIGH = "HIGH"
CRITICAL = "CRITICAL"
class ExecutionLevel(str, Enum):
AUTONOMOUS_LOW = "AUTONOMOUS_LOW"
AUTONOMOUS_HIGH = "AUTONOMOUS_HIGH"
SUPERVISED = "SUPERVISED"
OPERATOR_REVIEW = "OPERATOR_REVIEW"
class LeadSignal(str, Enum):
HIGH_RISK_BLOCKED = "high_risk_blocked"
NOVEL_ACTION = "novel_action"
POLICY_VIOLATION = "policy_violation"
CONFIDENCE_LOW = "confidence_low"
REPEATED_FAILURE = "repeated_failure"
# ============== REAL ARF BAYESIAN ENGINE ==============
class BayesianRiskEngine:
"""
True Bayesian inference with conjugate priors
Matches ARF OSS production implementation
"""
def __init__(self):
# Beta-Binomial conjugate prior
self.prior_alpha = 2.0
self.prior_beta = 5.0
self.action_priors = {
'database': {'alpha': 1.5, 'beta': 8.0},
'network': {'alpha': 3.0, 'beta': 4.0},
'compute': {'alpha': 4.0, 'beta': 3.0},
'security': {'alpha': 2.0, 'beta': 6.0},
'default': {'alpha': 2.0, 'beta': 5.0}
}
self.evidence_db = f"{settings.data_dir}/evidence.db"
self._init_db()
def _init_db(self):
"""Initialize SQLite DB for evidence storage"""
try:
with self._get_db() as conn:
conn.execute('''
CREATE TABLE IF NOT EXISTS evidence (
id TEXT PRIMARY KEY,
action_type TEXT,
action_hash TEXT,
success INTEGER,
total INTEGER,
timestamp TEXT,
metadata TEXT
)
''')
conn.execute('''
CREATE INDEX IF NOT EXISTS idx_action_hash
ON evidence(action_hash)
''')
except sqlite3.Error as e:
logger.error(f"Failed to initialize evidence database: {e}")
raise RuntimeError("Could not initialize evidence storage") from e
@contextmanager
def _get_db(self):
conn = None
try:
conn = sqlite3.connect(self.evidence_db)
yield conn
except sqlite3.Error as e:
logger.error(f"Database error: {e}")
raise
finally:
if conn:
conn.close()
def classify_action(self, action_text: str) -> str:
action_lower = action_text.lower()
if any(word in action_lower for word in ['database', 'db', 'sql', 'table', 'drop', 'delete']):
return 'database'
elif any(word in action_lower for word in ['network', 'firewall', 'load balancer']):
return 'network'
elif any(word in action_lower for word in ['pod', 'container', 'deploy', 'scale']):
return 'compute'
elif any(word in action_lower for word in ['security', 'cert', 'key', 'access']):
return 'security'
else:
return 'default'
def get_prior(self, action_type: str) -> Tuple[float, float]:
prior = self.action_priors.get(action_type, self.action_priors['default'])
return prior['alpha'], prior['beta']
def get_evidence(self, action_hash: str) -> Tuple[int, int]:
try:
with self._get_db() as conn:
cursor = conn.execute(
'SELECT SUM(success), SUM(total) FROM evidence WHERE action_hash = ?',
(action_hash[:50],)
)
row = cursor.fetchone()
return (row[0] or 0, row[1] or 0) if row else (0, 0)
except sqlite3.Error as e:
logger.error(f"Failed to retrieve evidence: {e}")
return (0, 0)
def calculate_posterior(self,
action_text: str,
context: Dict[str, Any]) -> Dict[str, Any]:
action_type = self.classify_action(action_text)
alpha0, beta0 = self.get_prior(action_type)
action_hash = hashlib.sha256(action_text.encode()).hexdigest()
successes, trials = self.get_evidence(action_hash)
alpha_n = alpha0 + successes
beta_n = beta0 + (trials - successes)
posterior_mean = alpha_n / (alpha_n + beta_n)
context_multiplier = self._context_likelihood(context)
risk_score = posterior_mean * context_multiplier
risk_score = min(0.99, max(0.01, risk_score))
variance = (alpha_n * beta_n) / ((alpha_n + beta_n)**2 * (alpha_n + beta_n + 1))
std_dev = variance ** 0.5
ci_lower = max(0.01, posterior_mean - 1.96 * std_dev)
ci_upper = min(0.99, posterior_mean + 1.96 * std_dev)
if risk_score > 0.8:
risk_level = RiskLevel.CRITICAL
elif risk_score > 0.6:
risk_level = RiskLevel.HIGH
elif risk_score > 0.4:
risk_level = RiskLevel.MEDIUM
else:
risk_level = RiskLevel.LOW
return {
"score": risk_score,
"level": risk_level,
"credible_interval": [ci_lower, ci_upper],
"posterior_parameters": {"alpha": alpha_n, "beta": beta_n},
"prior_used": {"alpha": alpha0, "beta": beta0, "type": action_type},
"evidence_used": {"successes": successes, "trials": trials},
"context_multiplier": context_multiplier,
"calculation": f"""
Posterior = Beta(ฮฑ={alpha_n:.1f}, ฮฒ={beta_n:.1f})
Mean = {alpha_n:.1f} / ({alpha_n:.1f} + {beta_n:.1f}) = {posterior_mean:.3f}
ร— Context multiplier {context_multiplier:.2f} = {risk_score:.3f}
"""
}
def _context_likelihood(self, context: Dict) -> float:
multiplier = 1.0
if context.get('environment') == 'production':
multiplier *= 1.5
elif context.get('environment') == 'staging':
multiplier *= 0.8
hour = datetime.now().hour
if hour < 6 or hour > 22:
multiplier *= 1.3
if context.get('user_role') == 'junior':
multiplier *= 1.4
elif context.get('user_role') == 'senior':
multiplier *= 0.9
if not context.get('backup_available', True):
multiplier *= 1.6
return multiplier
def record_outcome(self, action_text: str, success: bool):
action_hash = hashlib.sha256(action_text.encode()).hexdigest()
action_type = self.classify_action(action_text)
try:
with self._get_db() as conn:
conn.execute('''
INSERT INTO evidence (id, action_type, action_hash, success, total, timestamp)
VALUES (?, ?, ?, ?, ?, ?)
''', (
str(uuid.uuid4()),
action_type,
action_hash[:50],
1 if success else 0,
1,
datetime.utcnow().isoformat()
))
conn.commit()
logger.info(f"Recorded outcome for {action_type}: success={success}")
except sqlite3.Error as e:
logger.error(f"Failed to record outcome: {e}")
# ============== POLICY ENGINE ==============
class PolicyEngine:
def __init__(self):
self.config = {
"confidence_threshold": settings.default_confidence_threshold,
"max_autonomous_risk": settings.default_max_risk,
"risk_thresholds": {
RiskLevel.LOW: 0.7,
RiskLevel.MEDIUM: 0.5,
RiskLevel.HIGH: 0.3,
RiskLevel.CRITICAL: 0.1
},
"destructive_patterns": [
r'\bdrop\s+database\b',
r'\bdelete\s+from\b',
r'\btruncate\b',
r'\balter\s+table\b',
r'\bdrop\s+table\b',
r'\bshutdown\b',
r'\bterminate\b',
r'\brm\s+-rf\b'
],
"require_human": [RiskLevel.CRITICAL, RiskLevel.HIGH],
"require_rollback": True
}
def evaluate(self,
action: str,
risk: Dict[str, Any],
confidence: float) -> Dict[str, Any]:
gates = []
# Gate 1: Confidence threshold
confidence_passed = confidence >= self.config["confidence_threshold"]
gates.append({
"gate": "confidence_threshold",
"passed": confidence_passed,
"threshold": self.config["confidence_threshold"],
"actual": confidence,
"reason": f"Confidence {confidence:.2f} {'โ‰ฅ' if confidence_passed else '<'} threshold {self.config['confidence_threshold']}",
"type": "numerical"
})
# Gate 2: Risk level
risk_levels = list(RiskLevel)
max_idx = risk_levels.index(RiskLevel(self.config["max_autonomous_risk"]))
action_idx = risk_levels.index(risk["level"])
risk_passed = action_idx <= max_idx
gates.append({
"gate": "risk_assessment",
"passed": risk_passed,
"max_allowed": self.config["max_autonomous_risk"],
"actual": risk["level"].value,
"reason": f"Risk level {risk['level'].value} {'โ‰ค' if risk_passed else '>'} max autonomous {self.config['max_autonomous_risk']}",
"type": "categorical",
"metadata": {
"risk_score": risk["score"],
"credible_interval": risk["credible_interval"]
}
})
# Gate 3: Destructive check
import re
is_destructive = any(
re.search(pattern, action.lower())
for pattern in self.config["destructive_patterns"]
)
gates.append({
"gate": "destructive_check",
"passed": not is_destructive,
"is_destructive": is_destructive,
"reason": "Non-destructive operation" if not is_destructive else "Destructive operation detected",
"type": "boolean",
"metadata": {"requires_rollback": is_destructive}
})
# Gate 4: Human review requirement
requires_human = risk["level"] in self.config["require_human"]
gates.append({
"gate": "human_review",
"passed": not requires_human,
"requires_human": requires_human,
"reason": "Human review not required" if not requires_human else f"Human review required for {risk['level'].value} risk",
"type": "boolean"
})
# Gate 5: OSS license (always passes in OSS)
gates.append({
"gate": "license_check",
"passed": True,
"edition": "OSS",
"reason": "OSS edition - advisory only",
"type": "license"
})
all_passed = all(g["passed"] for g in gates)
if not all_passed:
required_level = ExecutionLevel.OPERATOR_REVIEW
elif risk["level"] == RiskLevel.LOW:
required_level = ExecutionLevel.AUTONOMOUS_LOW
elif risk["level"] == RiskLevel.MEDIUM:
required_level = ExecutionLevel.AUTONOMOUS_HIGH
else:
required_level = ExecutionLevel.SUPERVISED
return {
"allowed": all_passed,
"required_level": required_level.value,
"gates": gates,
"advisory_only": True,
"oss_disclaimer": "OSS edition provides advisory only. Enterprise adds execution."
}
def update_config(self, key: str, value: Any):
if key in self.config:
self.config[key] = value
logger.info(f"Policy updated: {key} = {value}")
return True
return False
# ============== RAG MEMORY WITH PERSISTENCE ==============
class RAGMemory:
def __init__(self):
self.db_path = f"{settings.data_dir}/memory.db"
self._init_db()
self.embedding_cache = {}
def _init_db(self):
try:
with self._get_db() as conn:
conn.execute('''
CREATE TABLE IF NOT EXISTS incidents (
id TEXT PRIMARY KEY,
action TEXT,
action_hash TEXT,
risk_score REAL,
risk_level TEXT,
confidence REAL,
allowed BOOLEAN,
gates TEXT,
timestamp TEXT,
embedding TEXT
)
''')
conn.execute('''
CREATE TABLE IF NOT EXISTS signals (
id TEXT PRIMARY KEY,
signal_type TEXT,
action TEXT,
risk_score REAL,
metadata TEXT,
timestamp TEXT,
contacted BOOLEAN DEFAULT 0
)
''')
conn.execute('CREATE INDEX IF NOT EXISTS idx_action_hash ON incidents(action_hash)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_signal_type ON signals(signal_type)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_signal_contacted ON signals(contacted)')
except sqlite3.Error as e:
logger.error(f"Failed to initialize memory database: {e}")
raise RuntimeError("Could not initialize memory storage") from e
@contextmanager
def _get_db(self):
conn = None
try:
conn = sqlite3.connect(self.db_path)
conn.row_factory = sqlite3.Row
yield conn
except sqlite3.Error as e:
logger.error(f"Database error in memory: {e}")
raise
finally:
if conn:
conn.close()
def _simple_embedding(self, text: str) -> List[float]:
if text in self.embedding_cache:
return self.embedding_cache[text]
words = text.lower().split()
trigrams = set()
for word in words:
for i in range(len(word) - 2):
trigrams.add(word[i:i+3])
vector = [hash(t) % 1000 / 1000.0 for t in sorted(trigrams)[:100]]
while len(vector) < 100:
vector.append(0.0)
vector = vector[:100]
self.embedding_cache[text] = vector
return vector
def store_incident(self,
action: str,
risk_score: float,
risk_level: RiskLevel,
confidence: float,
allowed: bool,
gates: List[Dict]):
action_hash = hashlib.sha256(action.encode()).hexdigest()[:50]
embedding = json.dumps(self._simple_embedding(action))
try:
with self._get_db() as conn:
conn.execute('''
INSERT INTO incidents
(id, action, action_hash, risk_score, risk_level, confidence, allowed, gates, timestamp, embedding)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
''', (
str(uuid.uuid4()),
action[:500],
action_hash,
risk_score,
risk_level.value,
confidence,
1 if allowed else 0,
json.dumps(gates),
datetime.utcnow().isoformat(),
embedding
))
conn.commit()
except sqlite3.Error as e:
logger.error(f"Failed to store incident: {e}")
def find_similar(self, action: str, limit: int = 5) -> List[Dict]:
query_embedding = self._simple_embedding(action)
try:
with self._get_db() as conn:
cursor = conn.execute('''
SELECT * FROM incidents
ORDER BY timestamp DESC
LIMIT 100
''')
incidents = []
for row in cursor.fetchall():
stored_embedding = json.loads(row['embedding'])
dot = sum(q * s for q, s in zip(query_embedding, stored_embedding))
norm_q = sum(q*q for q in query_embedding) ** 0.5
norm_s = sum(s*s for s in stored_embedding) ** 0.5
similarity = dot / (norm_q * norm_s) if (norm_q > 0 and norm_s > 0) else 0
incidents.append({
'id': row['id'],
'action': row['action'],
'risk_score': row['risk_score'],
'risk_level': row['risk_level'],
'confidence': row['confidence'],
'allowed': bool(row['allowed']),
'timestamp': row['timestamp'],
'similarity': similarity
})
incidents.sort(key=lambda x: x['similarity'], reverse=True)
return incidents[:limit]
except sqlite3.Error as e:
logger.error(f"Failed to find similar incidents: {e}")
return []
def track_enterprise_signal(self,
signal_type: LeadSignal,
action: str,
risk_score: float,
metadata: Dict = None):
signal = {
'id': str(uuid.uuid4()),
'signal_type': signal_type.value,
'action': action[:200],
'risk_score': risk_score,
'metadata': json.dumps(metadata or {}),
'timestamp': datetime.utcnow().isoformat(),
'contacted': 0
}
try:
with self._get_db() as conn:
conn.execute('''
INSERT INTO signals
(id, signal_type, action, risk_score, metadata, timestamp, contacted)
VALUES (?, ?, ?, ?, ?, ?, ?)
''', (
signal['id'],
signal['signal_type'],
signal['action'],
signal['risk_score'],
signal['metadata'],
signal['timestamp'],
signal['contacted']
))
conn.commit()
except sqlite3.Error as e:
logger.error(f"Failed to track signal: {e}")
return None
logger.info(f"๐Ÿ”” Enterprise signal: {signal_type.value} - {action[:50]}...")
if signal_type in [LeadSignal.HIGH_RISK_BLOCKED, LeadSignal.NOVEL_ACTION]:
self._notify_sales_team(signal)
return signal
def _notify_sales_team(self, signal: Dict):
if settings.slack_webhook:
try:
requests.post(settings.slack_webhook, json={
"text": f"๐Ÿšจ *Enterprise Lead Signal*\n"
f"Type: {signal['signal_type']}\n"
f"Action: {signal['action']}\n"
f"Risk Score: {signal['risk_score']:.2f}\n"
f"Time: {signal['timestamp']}\n"
f"Contact: {settings.lead_email}"
}, timeout=5)
except requests.RequestException as e:
logger.error(f"Slack notification failed: {e}")
def get_uncontacted_signals(self) -> List[Dict]:
try:
with self._get_db() as conn:
cursor = conn.execute('''
SELECT * FROM signals
WHERE contacted = 0
ORDER BY timestamp DESC
''')
signals = []
for row in cursor.fetchall():
signals.append({
'id': row['id'],
'signal_type': row['signal_type'],
'action': row['action'],
'risk_score': row['risk_score'],
'metadata': json.loads(row['metadata']),
'timestamp': row['timestamp']
})
return signals
except sqlite3.Error as e:
logger.error(f"Failed to get uncontacted signals: {e}")
return []
def mark_contacted(self, signal_id: str):
try:
with self._get_db() as conn:
conn.execute('UPDATE signals SET contacted = 1 WHERE id = ?', (signal_id,))
conn.commit()
except sqlite3.Error as e:
logger.error(f"Failed to mark signal as contacted: {e}")
# ============== AUTHENTICATION ==============
security = HTTPBearer()
async def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
if credentials.credentials != settings.api_key:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Invalid API key"
)
return credentials.credentials
# ============== PYDANTIC MODELS ==============
class ActionRequest(BaseModel):
proposedAction: str = Field(..., min_length=1, max_length=1000)
confidenceScore: float = Field(..., ge=0.0, le=1.0)
riskLevel: RiskLevel
description: Optional[str] = None
requiresHuman: bool = False
rollbackFeasible: bool = True
user_role: str = "devops"
session_id: Optional[str] = None
@field_validator('proposedAction')
@classmethod
def validate_action(cls, v: str) -> str:
if len(v.strip()) == 0:
raise ValueError('Action cannot be empty')
return v
class ConfigUpdateRequest(BaseModel):
confidenceThreshold: Optional[float] = Field(None, ge=0.5, le=1.0)
maxAutonomousRisk: Optional[RiskLevel] = None
class GateResult(BaseModel):
gate: str
reason: str
passed: bool
threshold: Optional[float] = None
actual: Optional[float] = None
type: str = "boolean"
metadata: Optional[Dict] = None
class EvaluationResponse(BaseModel):
allowed: bool
requiredLevel: str
gatesTriggered: List[GateResult]
shouldEscalate: bool
escalationReason: Optional[str] = None
executionLadder: Optional[Dict] = None
oss_disclaimer: str = "OSS edition provides advisory only. Enterprise adds mechanical gates and execution."
class LeadSignalResponse(BaseModel):
id: str
signal_type: str
action: str
risk_score: float
timestamp: str
metadata: Dict
# ============== FASTAPI SETUP ==============
app = FastAPI(
title="ARF OSS Real Engine",
version="3.3.9",
description="Real ARF OSS components for enterprise lead generation",
contact={
"name": "ARF Sales",
"email": settings.lead_email,
}
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize ARF components
risk_engine = BayesianRiskEngine()
policy_engine = PolicyEngine()
memory = RAGMemory()
# ============== API ENDPOINTS (with authentication) ==============
@app.get("/health")
async def health_check():
"""Public health check endpoint (no auth required)"""
return {
"status": "healthy",
"version": "3.3.9",
"edition": "OSS",
"memory_entries": len(memory.get_uncontacted_signals()),
"timestamp": datetime.utcnow().isoformat()
}
@app.get("/api/v1/config", dependencies=[Depends(verify_api_key)])
async def get_config():
"""Get current ARF configuration (protected)"""
return {
"confidenceThreshold": policy_engine.config["confidence_threshold"],
"maxAutonomousRisk": policy_engine.config["max_autonomous_risk"],
"riskScoreThresholds": policy_engine.config["risk_thresholds"],
"version": "3.3.9",
"edition": "OSS"
}
@app.post("/api/v1/config", dependencies=[Depends(verify_api_key)])
async def update_config(config: ConfigUpdateRequest):
"""Update ARF configuration (protected)"""
if config.confidenceThreshold:
policy_engine.update_config("confidence_threshold", config.confidenceThreshold)
if config.maxAutonomousRisk:
policy_engine.update_config("max_autonomous_risk", config.maxAutonomousRisk.value)
return await get_config()
@app.post("/api/v1/evaluate", dependencies=[Depends(verify_api_key)], response_model=EvaluationResponse)
async def evaluate_action(request: ActionRequest):
"""
Real ARF OSS evaluation pipeline (protected)
"""
try:
context = {
"environment": "production",
"user_role": request.user_role,
"backup_available": request.rollbackFeasible,
"requires_human": request.requiresHuman
}
risk = risk_engine.calculate_posterior(
action_text=request.proposedAction,
context=context
)
policy = policy_engine.evaluate(
action=request.proposedAction,
risk=risk,
confidence=request.confidenceScore
)
similar = memory.find_similar(request.proposedAction, limit=3)
if not policy["allowed"] and risk["score"] > 0.7:
memory.track_enterprise_signal(
signal_type=LeadSignal.HIGH_RISK_BLOCKED,
action=request.proposedAction,
risk_score=risk["score"],
metadata={
"confidence": request.confidenceScore,
"risk_level": risk["level"].value,
"failed_gates": [g["gate"] for g in policy["gates"] if not g["passed"]]
}
)
if len(similar) < 2 and risk["score"] > 0.6:
memory.track_enterprise_signal(
signal_type=LeadSignal.NOVEL_ACTION,
action=request.proposedAction,
risk_score=risk["score"],
metadata={"similar_count": len(similar)}
)
memory.store_incident(
action=request.proposedAction,
risk_score=risk["score"],
risk_level=risk["level"],
confidence=request.confidenceScore,
allowed=policy["allowed"],
gates=policy["gates"]
)
gates = []
for g in policy["gates"]:
gates.append(GateResult(
gate=g["gate"],
reason=g["reason"],
passed=g["passed"],
threshold=g.get("threshold"),
actual=g.get("actual"),
type=g.get("type", "boolean"),
metadata=g.get("metadata")
))
execution_ladder = {
"levels": [
{"name": "AUTONOMOUS_LOW", "required": gates[0].passed and gates[1].passed},
{"name": "AUTONOMOUS_HIGH", "required": all(g.passed for g in gates[:3])},
{"name": "SUPERVISED", "required": all(g.passed for g in gates[:4])},
{"name": "OPERATOR_REVIEW", "required": True}
],
"current": policy["required_level"]
}
return EvaluationResponse(
allowed=policy["allowed"],
requiredLevel=policy["required_level"],
gatesTriggered=gates,
shouldEscalate=not policy["allowed"],
escalationReason=None if policy["allowed"] else "Failed mechanical gates",
executionLadder=execution_ladder
)
except Exception as e:
logger.error(f"Evaluation failed: {e}", exc_info=True)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Internal server error during evaluation"
)
@app.get("/api/v1/enterprise/signals", dependencies=[Depends(verify_api_key)])
async def get_enterprise_signals(contacted: bool = False):
"""
Get enterprise lead signals (protected endpoint)
"""
try:
if contacted:
signals = memory.get_uncontacted_signals()
else:
with memory._get_db() as conn:
cursor = conn.execute('''
SELECT * FROM signals
WHERE datetime(timestamp) > datetime('now', '-30 days')
ORDER BY timestamp DESC
''')
signals = []
for row in cursor.fetchall():
signals.append({
'id': row['id'],
'signal_type': row['signal_type'],
'action': row['action'],
'risk_score': row['risk_score'],
'metadata': json.loads(row['metadata']),
'timestamp': row['timestamp'],
'contacted': bool(row['contacted'])
})
return {"signals": signals, "count": len(signals)}
except Exception as e:
logger.error(f"Failed to retrieve signals: {e}")
raise HTTPException(status_code=500, detail="Could not retrieve signals")
@app.post("/api/v1/enterprise/signals/{signal_id}/contact", dependencies=[Depends(verify_api_key)])
async def mark_signal_contacted(signal_id: str):
memory.mark_contacted(signal_id)
return {"status": "success", "message": "Signal marked as contacted"}
@app.get("/api/v1/memory/similar", dependencies=[Depends(verify_api_key)])
async def get_similar_actions(action: str, limit: int = 5):
similar = memory.find_similar(action, limit=limit)
return {"similar": similar, "count": len(similar)}
@app.post("/api/v1/feedback", dependencies=[Depends(verify_api_key)])
async def record_outcome(action: str, success: bool):
risk_engine.record_outcome(action, success)
return {"status": "success", "message": "Outcome recorded"}
# ============== GRADIO LEAD GENERATION UI ==============
def create_lead_gen_ui():
"""Professional lead generation interface (no auth needed for UI)"""
with gr.Blocks(title="ARF OSS - Enterprise Reliability Intelligence") as ui:
gr.HTML(f"""
<div style="padding: 2rem; border-radius: 1rem; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; text-align: center;">
<h1 style="font-size: 3em; margin-bottom: 0.5rem;">๐Ÿค– ARF OSS v3.3.9</h1>
<h2 style="font-size: 1.5em; font-weight: 300; margin-bottom: 2rem;">
Real Bayesian Reliability Intelligence
</h2>
<div style="display: inline-block; background: rgba(255,255,255,0.2); padding: 0.5rem 1rem;
border-radius: 2rem; margin-bottom: 2rem;">
โšก Running REAL ARF OSS Components โ€ข No Simulation
</div>
</div>
""")
with gr.Row():
with gr.Column():
gr.HTML("""
<div style="text-align: center; padding: 2rem;">
<h3 style="color: #333; font-size: 2em;">From Bayesian Analysis to Autonomous Execution</h3>
<p style="color: #666; font-size: 1.2em; max-width: 800px; margin: 1rem auto;">
This demo uses real ARF OSS components for risk assessment.
Enterprise adds mechanical gates, learning loops, and governed execution.
</p>
</div>
""")
with gr.Row():
with gr.Column():
gr.HTML("""
<div style="padding: 1.5rem; border-radius: 0.5rem; background: #f8f9fa; border-left: 4px solid #667eea; height: 100%;">
<h4>๐Ÿงฎ True Bayesian Inference</h4>
<p>Beta-Binomial conjugate priors with evidence updates</p>
</div>
""")
with gr.Column():
gr.HTML("""
<div style="padding: 1.5rem; border-radius: 0.5rem; background: #f8f9fa; border-left: 4px solid #667eea; height: 100%;">
<h4>๐Ÿ›ก๏ธ Deterministic Policies</h4>
<p>5 mechanical gates with live configuration</p>
</div>
""")
with gr.Row():
with gr.Column():
gr.HTML("""
<div style="padding: 1.5rem; border-radius: 0.5rem; background: #f8f9fa; border-left: 4px solid #667eea; height: 100%;">
<h4>๐Ÿ’พ Persistent RAG Memory</h4>
<p>SQLite + vector embeddings for incident recall</p>
</div>
""")
with gr.Column():
gr.HTML("""
<div style="padding: 1.5rem; border-radius: 0.5rem; background: #f8f9fa; border-left: 4px solid #667eea; height: 100%;">
<h4>๐Ÿ“Š Lead Intelligence</h4>
<p>Automatic enterprise signal detection</p>
</div>
""")
demo_stats = gr.JSON(
label="๐Ÿ“Š Live Demo Statistics",
value={
"active_since": datetime.utcnow().strftime("%Y-%m-%d %H:%M"),
"bayesian_prior": "Beta(2.0, 5.0)",
"memory_size": len(memory.get_uncontacted_signals()),
"enterprise_signals": len(memory.get_uncontacted_signals())
}
)
gr.HTML(f"""
<div style="margin: 3rem 0; padding: 3rem; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
border-radius: 1rem; text-align: center; color: white;">
<h2 style="font-size: 2.5em; margin-bottom: 1rem;">๐Ÿš€ Ready for Autonomous Operations?</h2>
<p style="font-size: 1.3em; margin-bottom: 2rem;">
See ARF Enterprise with mechanical gates and execution
</p>
<div style="display: flex; gap: 1rem; justify-content: center; flex-wrap: wrap;">
<a href="mailto:{settings.lead_email}?subject=ARF%20Enterprise%20Demo%20Request&body=I%20saw%20the%20real%20ARF%20OSS%20demo%20and%20would%20like%20to%20discuss%20Enterprise%20capabilities."
style="background: white; color: #667eea; padding: 1rem 2rem; border-radius: 2rem; font-weight: bold; text-decoration: none; display: inline-block; margin: 0.5rem;">
๐Ÿ“ง {settings.lead_email}
</a>
<a href="{settings.calendly_url}" target="_blank"
style="background: #FFD700; color: #333; padding: 1rem 2rem; border-radius: 2rem; font-weight: bold; text-decoration: none; display: inline-block; margin: 0.5rem;">
๐Ÿ“… Schedule Technical Demo
</a>
</div>
<p style="margin-top: 2rem; font-size: 0.9em; opacity: 0.9;">
โšก 30-min technical deep-dive โ€ข Live autonomous execution โ€ข Enterprise pricing<br>
๐Ÿ”’ All demos confidential and tailored to your infrastructure
</p>
</div>
""")
gr.HTML(f"""
<div style="text-align: center; padding: 2rem; color: #666; border-top: 1px solid #eee;">
<p>
๐Ÿ“ง <a href="mailto:{settings.lead_email}" style="color: #667eea;">{settings.lead_email}</a> โ€ข
๐Ÿ™ <a href="https://github.com/petterjuan/agentic-reliability-framework" style="color: #667eea;">GitHub</a>
</p>
<p style="font-size: 0.9rem;">
ยฉ 2026 ARF - Open Source Intelligence, Enterprise Execution<br>
<span style="font-size: 0.8rem; color: #999;">
v3.3.9 โ€ข Real Bayesian Inference โ€ข Persistent RAG โ€ข Lead Intelligence
</span>
</p>
</div>
""")
return ui
# ============== MOUNT GRADIO ON FASTAPI ==============
gradio_ui = create_lead_gen_ui()
app = mount_gradio_app(app, gradio_ui, path="/")
# ============== MAIN ENTRY POINT ==============
if __name__ == "__main__":
import uvicorn
port = int(os.environ.get('PORT', 7860))
# ๐Ÿ”ฅ Ensure any lingering Gradio servers are closed before starting
try:
gr.close_all()
except:
pass
logger.info("="*60)
logger.info("๐Ÿš€ ARF OSS v3.3.9 Starting")
logger.info(f"๐Ÿ“Š Data directory: {settings.data_dir}")
logger.info(f"๐Ÿ“ง Lead email: {settings.lead_email}")
logger.info(f"๐Ÿ”‘ API Key: {settings.api_key[:8]}... (set in HF secrets)")
logger.info(f"๐ŸŒ Serving at: http://0.0.0.0:{port}")
logger.info("="*60)
uvicorn.run(
"hf_demo:app",
host="0.0.0.0",
port=port,
log_level="info",
reload=False
)