WritingStudio / app.py
jmisak's picture
Upload app.py
970c13f verified
"""
HuggingFace Spaces Entry Point
This file is the entry point for HuggingFace Spaces deployment.
It imports and launches the production-grade Writing Studio application.
For local development or self-hosted deployment, you can also use:
python -m writing_studio.main
"""
import os
import sys
# Add src to path for imports
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
# Set HuggingFace Spaces friendly defaults
os.environ.setdefault("ENVIRONMENT", "production")
os.environ.setdefault("DEBUG", "false")
os.environ.setdefault("LOG_LEVEL", "INFO")
os.environ.setdefault("ENABLE_METRICS", "false") # Disable metrics server on HF Spaces
os.environ.setdefault("LOG_FORMAT", "text") # Text logs are easier to read on HF Spaces
try:
# Try to import the production application
from writing_studio.core.analyzer import WritingAnalyzer
from writing_studio.core.config import settings
from writing_studio.core.exceptions import WritingStudioException
from writing_studio.utils.logging import logger
import gradio as gr
logger.info(f"Starting {settings.app_name} v{settings.app_version}")
logger.info(f"Environment: {settings.environment}")
def create_interface() -> gr.Blocks:
"""Create production-grade Gradio interface for HuggingFace Spaces."""
analyzer = WritingAnalyzer()
def analyze_wrapper(user_input: str, model_name: str, prompt_pack: str) -> tuple:
"""Wrapper for analysis with error handling."""
try:
if not user_input or not user_input.strip():
return (
"",
"",
"⚠️ Please provide some text to analyze.",
"",
)
original, revision, feedback, diff_html, metadata = analyzer.analyze_and_compare(
user_input, model_name, prompt_pack
)
# Format feedback with metadata
feedback_with_meta = f"{feedback}\n\n---\n⏱️ Processing time: {metadata['duration']:.2f}s\n🤖 Model: {metadata['model']}"
return original, revision, feedback_with_meta, diff_html
except WritingStudioException as e:
error_msg = f"❌ Error: {e.message}"
if e.details:
error_msg += f"\n\nℹ️ Details: {e.details}"
logger.error(f"Analysis failed: {error_msg}")
return "", "", error_msg, ""
except Exception as e:
error_msg = f"❌ Unexpected error: {str(e)}"
logger.error(f"Unexpected error in analysis: {e}", exc_info=True)
return "", "", error_msg, ""
# Create Gradio interface
with gr.Blocks(
title=settings.app_name,
theme=gr.themes.Soft(),
) as demo:
gr.Markdown(
f"""
# ✍️ {settings.app_name}
**AI-Powered Writing Revision + Comprehensive Rubric Analysis**
Get your text professionally revised by AI and receive detailed feedback across multiple criteria.
**Features:**
- 🤖 **AI-Powered Revision** using FLAN-T5 (instruction-tuned model)
- 🎯 **Real Rubric Scoring** (Clarity, Conciseness, Organization, Evidence, Grammar)
- 📊 **Visual Diff** highlighting all changes
- 📝 **5 Specialized Modes** (General, Literature, Tech Comm, Academic, Creative)
- 💡 **Actionable Feedback** to understand improvements
**Version:** {settings.app_version} | **Model:** FLAN-T5 (instruction-following)
"""
)
with gr.Row():
with gr.Column(scale=2):
user_input = gr.Textbox(
lines=10,
placeholder="Paste your draft here...",
label="Your Draft",
info=f"Maximum {settings.max_text_length:,} characters",
)
with gr.Column(scale=1):
model_name = gr.Textbox(
value=settings.default_model,
label="AI Model",
info="FLAN-T5 (instruction-tuned for revision)",
)
prompt_pack = gr.Dropdown(
choices=analyzer.get_available_prompt_packs(),
value="General",
label="Revision Mode",
info="Select writing context",
)
run_btn = gr.Button("✨ Revise & Analyze", variant="primary", size="lg")
gr.Markdown("## 📊 Results")
with gr.Row():
original = gr.Textbox(
lines=12,
label="📄 Original Text",
interactive=False,
)
revision = gr.Textbox(
lines=12,
label="🤖 AI-Revised Text",
interactive=False,
)
feedback = gr.Textbox(
lines=10,
label="📊 Rubric Analysis",
info="Detailed scoring across 5 writing criteria",
interactive=False,
)
diff_html = gr.HTML(label="🔍 Changes Highlighted")
# Wire up the button
run_btn.click(
fn=analyze_wrapper,
inputs=[user_input, model_name, prompt_pack],
outputs=[original, revision, feedback, diff_html],
)
# Add footer
gr.Markdown(
"""
---
### 💡 How to Use
1. **Paste your text** in the input box
2. **Choose a revision mode** (General, Literature, Tech Comm, Academic, or Creative)
3. **Click "Revise & Analyze"**
4. **Review the AI revision** - see what improved
5. **Check the rubric scores** - understand the analysis
6. **View the diff** - see exactly what changed
### 🤖 About the AI Model
**FLAN-T5** is an instruction-tuned model specifically trained to follow revision instructions.
Unlike GPT-2 (text continuation), FLAN-T5 actually understands and executes revision tasks.
**First analysis takes ~60s** (model loading), subsequent analyses are much faster!
### 📊 Revision Modes
- **General** - Improve clarity and readability
- **Literature** - Strengthen literary analysis
- **Tech Comm** - Enhance technical precision
- **Academic** - Improve formal scholarly tone
- **Creative** - Enhance imagery and engagement
### 📚 Documentation
- [GitHub Repository](https://github.com/yourusername/writing-studio)
- [User Guide](https://github.com/yourusername/writing-studio/blob/main/docs/USER_GUIDE.md)
---
Built with [Gradio](https://gradio.app/) • Powered by FLAN-T5 + Custom Rubric Algorithms
"""
)
return demo
# Create and launch the interface
demo = create_interface()
# Launch with HuggingFace Spaces friendly settings
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
show_error=True,
)
except ImportError as e:
# Fallback to simple version if production code not available
print(f"Warning: Could not import production code: {e}")
print("Falling back to simple version...")
import gradio as gr
from transformers import pipeline
import difflib
# Simple version for emergency fallback
generator = pipeline("text-generation", model="distilgpt2")
def simple_analyze(user_text, model_name="distilgpt2"):
"""Simple analysis function."""
if not user_text:
return "", "", "Please provide some text.", ""
try:
prompt = f"Revise this text for clarity:\n{user_text}"
revision = generator(prompt, max_length=300, num_return_sequences=1, do_sample=True)[0]["generated_text"]
feedback = "⚠️ Running in fallback mode. Install full version for rubric scoring."
diff = difflib.HtmlDiff().make_table(
user_text.splitlines(), revision.splitlines(),
fromdesc="Original", todesc="AI Revision"
)
return user_text, revision, feedback, diff
except Exception as e:
return "", "", f"Error: {str(e)}", ""
with gr.Blocks(title="AI Writing Studio") as demo:
gr.Markdown("# ✍️ AI Writing Studio (Simplified)")
gr.Markdown("⚠️ Running in fallback mode. Some features may be limited.")
with gr.Row():
user_input = gr.Textbox(lines=10, placeholder="Paste your draft here...")
model_name = gr.Textbox(value="distilgpt2", label="Model")
with gr.Row():
original = gr.Textbox(lines=12, label="Original")
revision = gr.Textbox(lines=12, label="Revision")
feedback = gr.Textbox(lines=8, label="Feedback")
diff_html = gr.HTML(label="Diff")
run_btn = gr.Button("Analyze")
run_btn.click(
fn=simple_analyze,
inputs=[user_input, model_name],
outputs=[original, revision, feedback, diff_html]
)
if __name__ == "__main__":
demo.launch()