prof-demo / src /api /main.py
sbicy's picture
Upload 17 files
deff797 verified
from fastapi import FastAPI
from fastapi.responses import HTMLResponse
from pathlib import Path
from .routes import app
@app.get("/", response_class=HTMLResponse)
async def root():
"""Serve an interactive demo page with visualizations."""
ui_path = Path(__file__).parent / "enhanced_ui.html"
with open(ui_path, 'r') as f:
return f.read()
@app.get("/simple", response_class=HTMLResponse)
async def simple_demo():
"""Serve the original simple demo page."""
return """
<!DOCTYPE html>
<html>
<head>
<title>Context-Aware Profanity Handler - Interactive Demo</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
max-width: 900px;
margin: 40px auto;
padding: 20px;
background: #f5f5f5;
}
.container {
background: white;
padding: 30px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
h1 {
color: #2c3e50;
border-bottom: 3px solid #3498db;
padding-bottom: 10px;
}
.input-group {
margin: 20px 0;
}
label {
display: block;
margin-bottom: 5px;
font-weight: 600;
color: #34495e;
}
textarea, select {
width: 100%;
padding: 10px;
border: 1px solid #ddd;
border-radius: 4px;
font-size: 14px;
box-sizing: border-box;
}
textarea {
min-height: 100px;
resize: vertical;
}
button {
background: #3498db;
color: white;
padding: 12px 30px;
border: none;
border-radius: 4px;
cursor: pointer;
font-size: 16px;
font-weight: 600;
}
button:hover {
background: #2980b9;
}
.result {
margin-top: 20px;
padding: 20px;
border-radius: 4px;
display: none;
}
.result.safe {
background: #d4edda;
border: 1px solid #c3e6cb;
color: #155724;
}
.result.warning {
background: #fff3cd;
border: 1px solid #ffeeba;
color: #856404;
}
.result.error {
background: #f8d7da;
border: 1px solid #f5c6cb;
color: #721c24;
}
.example {
background: #e8f4f8;
padding: 15px;
border-radius: 4px;
margin: 20px 0;
border-left: 4px solid #3498db;
}
.example h3 {
margin-top: 0;
color: #2c3e50;
}
.badge {
display: inline-block;
padding: 4px 8px;
border-radius: 3px;
font-size: 12px;
font-weight: 600;
margin-right: 5px;
}
.badge.safe { background: #d4edda; color: #155724; }
.badge.mild { background: #fff3cd; color: #856404; }
.badge.explicit { background: #f8d7da; color: #721c24; }
.badge.slur { background: #f5c6cb; color: #721c24; }
.badge.threat { background: #f5c6cb; color: #721c24; }
</style>
</head>
<body>
<div class="container">
<h1>🧩 Context-Aware Profanity Handler</h1>
<p>A demonstration of context-aware profanity detection and handling for AI-assisted reporting.</p>
<div class="example">
<h3>💡 Example Use Case</h3>
<p><strong>Input:</strong> "Report on asset: <em>Do You Want to F*** Me Tonight</em>"</p>
<p><strong>Context:</strong> Song Title (Entity Name)</p>
<p><strong>Expected Result:</strong> Detected but allowed, with transparent feedback about safe rendering.</p>
</div>
<div class="input-group">
<label for="text">Text to Analyze:</label>
<textarea id="text" placeholder="Enter text here, e.g., a song title, brand name, or user input...">Report on asset: Do You Want to Fuck Me Tonight</textarea>
</div>
<div class="input-group">
<label for="context">Content Category:</label>
<select id="context">
<option value="song_title">Song Title</option>
<option value="entity_name">Entity Name</option>
<option value="brand_name">Brand Name</option>
<option value="user_input">User Input</option>
</select>
</div>
<div class="input-group">
<label>
<input type="checkbox" id="strict_mode"> Strict Mode
</label>
</div>
<button onclick="analyzeText()">Analyze Text</button>
<div id="result" class="result"></div>
</div>
<script>
async function analyzeText() {
const text = document.getElementById('text').value;
const context = document.getElementById('context').value;
const strict_mode = document.getElementById('strict_mode').checked;
const resultDiv = document.getElementById('result');
resultDiv.style.display = 'none';
try {
const response = await fetch('/analyze', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ text, context, strict_mode })
});
const data = await response.json();
let className = 'safe';
if (data.toxicity_level === 'explicit' || data.toxicity_level === 'slur' || data.toxicity_level === 'threat') {
className = 'error';
} else if (data.toxicity_level === 'mild') {
className = 'warning';
}
resultDiv.className = 'result ' + className;
resultDiv.innerHTML = `
<h3>Analysis Results</h3>
<p><strong>Profanity Detected:</strong> ${data.contains_profanity ? 'Yes' : 'No'}</p>
<p><strong>Toxicity Level:</strong> <span class="badge ${data.toxicity_level}">${data.toxicity_level.toUpperCase()}</span></p>
<p><strong>Message:</strong> ${data.message}</p>
<hr>
<p><strong>Original Text:</strong><br><code>${text}</code></p>
<p><strong>Safe Text:</strong><br><code>${data.safe_text}</code></p>
`;
resultDiv.style.display = 'block';
} catch (error) {
resultDiv.className = 'result error';
resultDiv.innerHTML = `<p>Error: ${error.message}</p>`;
resultDiv.style.display = 'block';
}
}
</script>
</body>
</html>
"""
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)