File size: 10,103 Bytes
d7f7508
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ead4c16
d7f7508
ead4c16
2d59fd0
ead4c16
 
 
 
 
 
d7f7508
ead4c16
d7f7508
 
 
 
 
 
 
 
 
 
 
 
 
ead4c16
 
 
 
 
 
 
 
 
 
 
 
d7f7508
 
 
 
 
 
ead4c16
d7f7508
 
 
ead4c16
 
d7f7508
 
 
 
ead4c16
 
 
d7f7508
 
 
ead4c16
d7f7508
ead4c16
d7f7508
ead4c16
 
d7f7508
 
 
 
 
 
 
 
ead4c16
2d59fd0
 
ead4c16
 
 
 
 
2d59fd0
ead4c16
2d59fd0
ead4c16
 
2d59fd0
ead4c16
2d59fd0
ead4c16
d7f7508
ead4c16
 
 
 
 
d7f7508
ead4c16
d7f7508
 
ead4c16
d7f7508
 
 
ead4c16
d7f7508
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
970c13f
d7f7508
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
"""

HuggingFace Spaces Entry Point



This file is the entry point for HuggingFace Spaces deployment.

It imports and launches the production-grade Writing Studio application.



For local development or self-hosted deployment, you can also use:

    python -m writing_studio.main

"""

import os
import sys

# Add src to path for imports
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))

# Set HuggingFace Spaces friendly defaults
os.environ.setdefault("ENVIRONMENT", "production")
os.environ.setdefault("DEBUG", "false")
os.environ.setdefault("LOG_LEVEL", "INFO")
os.environ.setdefault("ENABLE_METRICS", "false")  # Disable metrics server on HF Spaces
os.environ.setdefault("LOG_FORMAT", "text")  # Text logs are easier to read on HF Spaces

try:
    # Try to import the production application
    from writing_studio.core.analyzer import WritingAnalyzer
    from writing_studio.core.config import settings
    from writing_studio.core.exceptions import WritingStudioException
    from writing_studio.utils.logging import logger
    import gradio as gr

    logger.info(f"Starting {settings.app_name} v{settings.app_version}")
    logger.info(f"Environment: {settings.environment}")

    def create_interface() -> gr.Blocks:
        """Create production-grade Gradio interface for HuggingFace Spaces."""
        analyzer = WritingAnalyzer()

        def analyze_wrapper(user_input: str, model_name: str, prompt_pack: str) -> tuple:
            """Wrapper for analysis with error handling."""
            try:
                if not user_input or not user_input.strip():
                    return (
                        "",
                        "",
                        "⚠️ Please provide some text to analyze.",
                        "",
                    )

                original, revision, feedback, diff_html, metadata = analyzer.analyze_and_compare(
                    user_input, model_name, prompt_pack
                )

                # Format feedback with metadata
                feedback_with_meta = f"{feedback}\n\n---\n⏱️ Processing time: {metadata['duration']:.2f}s\n🤖 Model: {metadata['model']}"

                return original, revision, feedback_with_meta, diff_html

            except WritingStudioException as e:
                error_msg = f"❌ Error: {e.message}"
                if e.details:
                    error_msg += f"\n\nℹ️ Details: {e.details}"
                logger.error(f"Analysis failed: {error_msg}")
                return "", "", error_msg, ""

            except Exception as e:
                error_msg = f"❌ Unexpected error: {str(e)}"
                logger.error(f"Unexpected error in analysis: {e}", exc_info=True)
                return "", "", error_msg, ""

        # Create Gradio interface
        with gr.Blocks(
            title=settings.app_name,
            theme=gr.themes.Soft(),
        ) as demo:
            gr.Markdown(
                f"""

                # ✍️ {settings.app_name}



                **AI-Powered Writing Revision + Comprehensive Rubric Analysis**



                Get your text professionally revised by AI and receive detailed feedback across multiple criteria.



                **Features:**

                - 🤖 **AI-Powered Revision** using FLAN-T5 (instruction-tuned model)

                - 🎯 **Real Rubric Scoring** (Clarity, Conciseness, Organization, Evidence, Grammar)

                - 📊 **Visual Diff** highlighting all changes

                - 📝 **5 Specialized Modes** (General, Literature, Tech Comm, Academic, Creative)

                - 💡 **Actionable Feedback** to understand improvements



                **Version:** {settings.app_version} | **Model:** FLAN-T5 (instruction-following)

                """
            )

            with gr.Row():
                with gr.Column(scale=2):
                    user_input = gr.Textbox(
                        lines=10,
                        placeholder="Paste your draft here...",
                        label="Your Draft",
                        info=f"Maximum {settings.max_text_length:,} characters",
                    )

                with gr.Column(scale=1):
                    model_name = gr.Textbox(
                        value=settings.default_model,
                        label="AI Model",
                        info="FLAN-T5 (instruction-tuned for revision)",
                    )
                    prompt_pack = gr.Dropdown(
                        choices=analyzer.get_available_prompt_packs(),
                        value="General",
                        label="Revision Mode",
                        info="Select writing context",
                    )
                    run_btn = gr.Button("✨ Revise & Analyze", variant="primary", size="lg")

            gr.Markdown("## 📊 Results")

            with gr.Row():
                original = gr.Textbox(
                    lines=12,
                    label="📄 Original Text",
                    interactive=False,
                )
                revision = gr.Textbox(
                    lines=12,
                    label="🤖 AI-Revised Text",
                    interactive=False,
                )

            feedback = gr.Textbox(
                lines=10,
                label="📊 Rubric Analysis",
                info="Detailed scoring across 5 writing criteria",
                interactive=False,
            )

            diff_html = gr.HTML(label="🔍 Changes Highlighted")

            # Wire up the button
            run_btn.click(
                fn=analyze_wrapper,
                inputs=[user_input, model_name, prompt_pack],
                outputs=[original, revision, feedback, diff_html],
            )

            # Add footer
            gr.Markdown(
                """

                ---



                ### 💡 How to Use



                1. **Paste your text** in the input box

                2. **Choose a revision mode** (General, Literature, Tech Comm, Academic, or Creative)

                3. **Click "Revise & Analyze"**

                4. **Review the AI revision** - see what improved

                5. **Check the rubric scores** - understand the analysis

                6. **View the diff** - see exactly what changed



                ### 🤖 About the AI Model



                **FLAN-T5** is an instruction-tuned model specifically trained to follow revision instructions.

                Unlike GPT-2 (text continuation), FLAN-T5 actually understands and executes revision tasks.



                **First analysis takes ~60s** (model loading), subsequent analyses are much faster!



                ### 📊 Revision Modes



                - **General** - Improve clarity and readability

                - **Literature** - Strengthen literary analysis

                - **Tech Comm** - Enhance technical precision

                - **Academic** - Improve formal scholarly tone

                - **Creative** - Enhance imagery and engagement



                ### 📚 Documentation



                - [GitHub Repository](https://github.com/yourusername/writing-studio)

                - [User Guide](https://github.com/yourusername/writing-studio/blob/main/docs/USER_GUIDE.md)



                ---



                Built with [Gradio](https://gradio.app/) • Powered by FLAN-T5 + Custom Rubric Algorithms

                """
            )

        return demo

    # Create and launch the interface
    demo = create_interface()

    # Launch with HuggingFace Spaces friendly settings
    if __name__ == "__main__":
        demo.launch(
            server_name="0.0.0.0",
            server_port=7860,
            share=False,
            show_error=True,
        )

except ImportError as e:
    # Fallback to simple version if production code not available
    print(f"Warning: Could not import production code: {e}")
    print("Falling back to simple version...")

    import gradio as gr
    from transformers import pipeline
    import difflib

    # Simple version for emergency fallback
    generator = pipeline("text-generation", model="distilgpt2")

    def simple_analyze(user_text, model_name="distilgpt2"):
        """Simple analysis function."""
        if not user_text:
            return "", "", "Please provide some text.", ""

        try:
            prompt = f"Revise this text for clarity:\n{user_text}"
            revision = generator(prompt, max_length=300, num_return_sequences=1, do_sample=True)[0]["generated_text"]

            feedback = "⚠️ Running in fallback mode. Install full version for rubric scoring."

            diff = difflib.HtmlDiff().make_table(
                user_text.splitlines(), revision.splitlines(),
                fromdesc="Original", todesc="AI Revision"
            )

            return user_text, revision, feedback, diff
        except Exception as e:
            return "", "", f"Error: {str(e)}", ""

    with gr.Blocks(title="AI Writing Studio") as demo:
        gr.Markdown("# ✍️ AI Writing Studio (Simplified)")
        gr.Markdown("⚠️ Running in fallback mode. Some features may be limited.")

        with gr.Row():
            user_input = gr.Textbox(lines=10, placeholder="Paste your draft here...")
            model_name = gr.Textbox(value="distilgpt2", label="Model")

        with gr.Row():
            original = gr.Textbox(lines=12, label="Original")
            revision = gr.Textbox(lines=12, label="Revision")

        feedback = gr.Textbox(lines=8, label="Feedback")
        diff_html = gr.HTML(label="Diff")

        run_btn = gr.Button("Analyze")
        run_btn.click(
            fn=simple_analyze,
            inputs=[user_input, model_name],
            outputs=[original, revision, feedback, diff_html]
        )

    if __name__ == "__main__":
        demo.launch()