Spaces:
Sleeping
Sleeping
File size: 6,225 Bytes
aeb3f7c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 |
"""Main application entry point with Gradio interface."""
import threading
import gradio as gr
from prometheus_client import start_http_server
from writing_studio.core.analyzer import WritingAnalyzer
from writing_studio.core.config import settings
from writing_studio.core.exceptions import WritingStudioException
from writing_studio.utils.logging import logger
from writing_studio.utils.monitoring import health_check
def create_interface() -> gr.Blocks:
"""
Create production-grade Gradio interface.
Returns:
Gradio Blocks interface
"""
analyzer = WritingAnalyzer()
def analyze_wrapper(user_input: str, model_name: str, prompt_pack: str) -> tuple:
"""
Wrapper for analysis with error handling.
Args:
user_input: User's text input
model_name: Model to use
prompt_pack: Prompt pack to use
Returns:
Tuple of outputs for Gradio interface
"""
try:
if not user_input or not user_input.strip():
return (
"",
"",
"Error: Please provide some text to analyze.",
"",
)
original, revision, feedback, diff_html, metadata = analyzer.analyze_and_compare(
user_input, model_name, prompt_pack
)
# Format feedback with metadata
feedback_with_meta = f"{feedback}\n\n---\nProcessing time: {metadata['duration']:.2f}s\nModel: {metadata['model']}"
return original, revision, feedback_with_meta, diff_html
except WritingStudioException as e:
error_msg = f"Error: {e.message}"
if e.details:
error_msg += f"\nDetails: {e.details}"
logger.error(f"Analysis failed: {error_msg}")
return "", "", error_msg, ""
except Exception as e:
error_msg = f"Unexpected error: {str(e)}"
logger.error(f"Unexpected error in analysis: {e}", exc_info=True)
return "", "", error_msg, ""
# Create Gradio interface
with gr.Blocks(
title=settings.app_name,
theme=gr.themes.Soft(),
) as demo:
gr.Markdown(
f"""
# {settings.app_name}
Compare drafts, get rubric-based feedback, and reflect on revisions.
**Version:** {settings.app_version} | **Environment:** {settings.environment}
"""
)
with gr.Row():
with gr.Column(scale=2):
user_input = gr.Textbox(
lines=10,
placeholder="Paste your draft here...",
label="Your Draft",
info=f"Maximum {settings.max_text_length} characters",
)
with gr.Column(scale=1):
model_name = gr.Textbox(
value=settings.default_model,
label="Model (HuggingFace ID)",
info="e.g., distilgpt2, gpt2",
)
prompt_pack = gr.Dropdown(
choices=analyzer.get_available_prompt_packs(),
value="General",
label="Prompt Pack",
info="Select the writing context",
)
run_btn = gr.Button("Analyze & Compare", variant="primary", size="lg")
gr.Markdown("## Results")
with gr.Row():
original = gr.Textbox(
lines=12,
label="Original Draft",
interactive=False,
)
revision = gr.Textbox(
lines=12,
label="AI Suggested Revision",
interactive=False,
)
feedback = gr.Textbox(
lines=8,
label="Rubric Feedback",
info="Detailed analysis based on writing criteria",
interactive=False,
)
if settings.enable_diff_highlighting:
diff_html = gr.HTML(label="Highlighted Differences")
else:
diff_html = gr.HTML(visible=False)
# Wire up the button
run_btn.click(
fn=analyze_wrapper,
inputs=[user_input, model_name, prompt_pack],
outputs=[original, revision, feedback, diff_html],
)
# Add footer with info
gr.Markdown(
"""
---
**Tips:**
- Start with shorter texts for faster results
- Try different prompt packs for specialized feedback
- Review the rubric feedback to understand strengths and areas for improvement
"""
)
return demo
def start_metrics_server() -> None:
"""Start Prometheus metrics server in background thread."""
if settings.enable_metrics:
try:
logger.info(f"Starting metrics server on port {settings.metrics_port}")
start_http_server(settings.metrics_port)
logger.info("Metrics server started successfully")
except Exception as e:
logger.error(f"Failed to start metrics server: {e}")
def main() -> None:
"""Main application entry point."""
logger.info(f"Starting {settings.app_name} v{settings.app_version}")
logger.info(f"Environment: {settings.environment}")
logger.info(f"Debug mode: {settings.debug}")
# Start metrics server in background if enabled
if settings.enable_metrics:
metrics_thread = threading.Thread(target=start_metrics_server, daemon=True)
metrics_thread.start()
# Check health before starting
health_status = health_check.check_health()
logger.info(f"Health check: {health_status['status']}")
if health_status["status"] == "unhealthy":
logger.error("Application is unhealthy, but starting anyway...")
# Create and launch interface
demo = create_interface()
logger.info(f"Launching Gradio interface on {settings.host}:{settings.port}")
demo.launch(
server_name=settings.host,
server_port=settings.port,
share=False,
show_error=settings.debug,
)
if __name__ == "__main__":
main()
|