Spaces:
Running
Running
| #!/usr/bin/env python3 | |
| """ | |
| app.py -- Gradio web UI for HF Spaces | |
| -------------------------------------- | |
| Provides a browser-based interface to the 3-agent humanizer | |
| pipeline. Designed for deployment on huggingface.co/spaces. | |
| """ | |
| import os | |
| import sys | |
| import logging | |
| import json | |
| import gradio as gr | |
| sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) | |
| from main import process_text # updated function name | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger("app") | |
| # -- the gradio callback -------------------------------------------------- | |
| def humanize_text(input_text): | |
| """Called when user clicks 'Humanize'. Returns 4 outputs.""" | |
| intensity = 1.0 # Force maximum intensity for Pattern Erasure | |
| if not input_text or not input_text.strip(): | |
| return ( | |
| "Please paste some text first!", | |
| "N/A", | |
| 0.0, | |
| ) | |
| try: | |
| # process_text returns (plan_str, draft, humanized_text, ver_str) | |
| plan_str, draft, final, ver_str = process_text( | |
| input_text.strip(), | |
| intensity=intensity | |
| ) | |
| # Parse plan_str back to dict for gr.JSON | |
| try: | |
| plan_json = json.loads(plan_str) | |
| except: | |
| plan_json = {"error": "Failed to parse plan", "raw": plan_str} | |
| # Verification string parsing (simple) | |
| # e.g. "Label: Real\nConfidence: 2.1%" | |
| try: | |
| label = ver_str.split("\n")[0].split(":")[1].strip() | |
| conf = float(ver_str.split("\n")[1].split(":")[1].strip().replace("%", "")) | |
| except: | |
| label = "Unknown" | |
| conf = 0.0 | |
| return ( | |
| final, | |
| label, | |
| conf, | |
| ) | |
| except Exception as exc: | |
| logger.exception("pipeline error") | |
| return ( | |
| f"Error: {exc}", | |
| "Error", | |
| 0.0, | |
| ) | |
| # -- build the UI ---------------------------------------------------------- | |
| CUSTOM_CSS = """ | |
| .gradio-container { | |
| max-width: 1000px !important; | |
| margin: auto !important; | |
| } | |
| .header-text { | |
| text-align: center; | |
| margin-bottom: 0.5rem; | |
| } | |
| """ | |
| with gr.Blocks(css=CUSTOM_CSS, title="AI Text Humanizer") as demo: | |
| gr.Markdown( | |
| """ | |
| # AI Text Humanizer (Re-Authoring Mode) | |
| ### Defeat Detection via fresh content generation | |
| This pipeline doesn't just paraphrase; it **re-authors**. | |
| 1. **Planner** extracts core ideas into a wording-free JSON outline. | |
| 2. **Writer** builds a fresh draft based *only* on the outline (is blind to original wording). | |
| 3. **Humanizer** applies stylometric refinement and iterative evasion checks. | |
| """, | |
| elem_classes="header-text", | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| input_box = gr.Textbox( | |
| label="Paste AI-Generated Text", | |
| placeholder="Paste the robotic AI text here...", | |
| lines=10, | |
| ) | |
| run_btn = gr.Button("Humanize", variant="primary", size="lg") | |
| with gr.Column(scale=1): | |
| output_box = gr.Textbox( | |
| label="Humanized Output", | |
| lines=10, | |
| interactive=False, | |
| ) | |
| with gr.Row(): | |
| label_out = gr.Textbox(label="Detection Result", interactive=False) | |
| conf_out = gr.Number(label="AI Confidence (%)", interactive=False) | |
| # wire it up | |
| run_btn.click( | |
| fn=humanize_text, | |
| inputs=[input_box], | |
| outputs=[output_box, label_out, conf_out], | |
| ) | |
| gr.Markdown( | |
| """ | |
| --- | |
| **Why Re-Authoring?** | |
| Standard 'humanizers' often trigger 'Paraphrased AI' flags because they keep the original n-gram structure. | |
| By stripping everything down to a JSON idea-map first, we break the detectable surface patterns entirely. | |
| """ | |
| ) | |
| # -- launch ---------------------------------------------------------------- | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |