File size: 7,993 Bytes
deff797 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
from fastapi import FastAPI
from fastapi.responses import HTMLResponse
from pathlib import Path
from .routes import app
@app.get("/", response_class=HTMLResponse)
async def root():
"""Serve an interactive demo page with visualizations."""
ui_path = Path(__file__).parent / "enhanced_ui.html"
with open(ui_path, 'r') as f:
return f.read()
@app.get("/simple", response_class=HTMLResponse)
async def simple_demo():
"""Serve the original simple demo page."""
return """
<!DOCTYPE html>
<html>
<head>
<title>Context-Aware Profanity Handler - Interactive Demo</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
max-width: 900px;
margin: 40px auto;
padding: 20px;
background: #f5f5f5;
}
.container {
background: white;
padding: 30px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
h1 {
color: #2c3e50;
border-bottom: 3px solid #3498db;
padding-bottom: 10px;
}
.input-group {
margin: 20px 0;
}
label {
display: block;
margin-bottom: 5px;
font-weight: 600;
color: #34495e;
}
textarea, select {
width: 100%;
padding: 10px;
border: 1px solid #ddd;
border-radius: 4px;
font-size: 14px;
box-sizing: border-box;
}
textarea {
min-height: 100px;
resize: vertical;
}
button {
background: #3498db;
color: white;
padding: 12px 30px;
border: none;
border-radius: 4px;
cursor: pointer;
font-size: 16px;
font-weight: 600;
}
button:hover {
background: #2980b9;
}
.result {
margin-top: 20px;
padding: 20px;
border-radius: 4px;
display: none;
}
.result.safe {
background: #d4edda;
border: 1px solid #c3e6cb;
color: #155724;
}
.result.warning {
background: #fff3cd;
border: 1px solid #ffeeba;
color: #856404;
}
.result.error {
background: #f8d7da;
border: 1px solid #f5c6cb;
color: #721c24;
}
.example {
background: #e8f4f8;
padding: 15px;
border-radius: 4px;
margin: 20px 0;
border-left: 4px solid #3498db;
}
.example h3 {
margin-top: 0;
color: #2c3e50;
}
.badge {
display: inline-block;
padding: 4px 8px;
border-radius: 3px;
font-size: 12px;
font-weight: 600;
margin-right: 5px;
}
.badge.safe { background: #d4edda; color: #155724; }
.badge.mild { background: #fff3cd; color: #856404; }
.badge.explicit { background: #f8d7da; color: #721c24; }
.badge.slur { background: #f5c6cb; color: #721c24; }
.badge.threat { background: #f5c6cb; color: #721c24; }
</style>
</head>
<body>
<div class="container">
<h1>🧩 Context-Aware Profanity Handler</h1>
<p>A demonstration of context-aware profanity detection and handling for AI-assisted reporting.</p>
<div class="example">
<h3>💡 Example Use Case</h3>
<p><strong>Input:</strong> "Report on asset: <em>Do You Want to F*** Me Tonight</em>"</p>
<p><strong>Context:</strong> Song Title (Entity Name)</p>
<p><strong>Expected Result:</strong> Detected but allowed, with transparent feedback about safe rendering.</p>
</div>
<div class="input-group">
<label for="text">Text to Analyze:</label>
<textarea id="text" placeholder="Enter text here, e.g., a song title, brand name, or user input...">Report on asset: Do You Want to Fuck Me Tonight</textarea>
</div>
<div class="input-group">
<label for="context">Content Category:</label>
<select id="context">
<option value="song_title">Song Title</option>
<option value="entity_name">Entity Name</option>
<option value="brand_name">Brand Name</option>
<option value="user_input">User Input</option>
</select>
</div>
<div class="input-group">
<label>
<input type="checkbox" id="strict_mode"> Strict Mode
</label>
</div>
<button onclick="analyzeText()">Analyze Text</button>
<div id="result" class="result"></div>
</div>
<script>
async function analyzeText() {
const text = document.getElementById('text').value;
const context = document.getElementById('context').value;
const strict_mode = document.getElementById('strict_mode').checked;
const resultDiv = document.getElementById('result');
resultDiv.style.display = 'none';
try {
const response = await fetch('/analyze', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ text, context, strict_mode })
});
const data = await response.json();
let className = 'safe';
if (data.toxicity_level === 'explicit' || data.toxicity_level === 'slur' || data.toxicity_level === 'threat') {
className = 'error';
} else if (data.toxicity_level === 'mild') {
className = 'warning';
}
resultDiv.className = 'result ' + className;
resultDiv.innerHTML = `
<h3>Analysis Results</h3>
<p><strong>Profanity Detected:</strong> ${data.contains_profanity ? 'Yes' : 'No'}</p>
<p><strong>Toxicity Level:</strong> <span class="badge ${data.toxicity_level}">${data.toxicity_level.toUpperCase()}</span></p>
<p><strong>Message:</strong> ${data.message}</p>
<hr>
<p><strong>Original Text:</strong><br><code>${text}</code></p>
<p><strong>Safe Text:</strong><br><code>${data.safe_text}</code></p>
`;
resultDiv.style.display = 'block';
} catch (error) {
resultDiv.className = 'result error';
resultDiv.innerHTML = `<p>Error: ${error.message}</p>`;
resultDiv.style.display = 'block';
}
}
</script>
</body>
</html>
"""
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
|