# ============================================================================== # Shakespeare Authenticator - Standalone Gradio Dashboard # ============================================================================== import gradio as gr import torch import numpy as np from transformers import AutoModelForSequenceClassification, AutoTokenizer import time import os print("🚀 Starting Shakespeare Authenticator...") print(f"đŸ“Ļ PyTorch version: {torch.__version__}") print(f"🔧 CUDA available: {torch.cuda.is_available()}") # Configuration MODEL_NAME = "lanretto/shakespeare-authenticator" # Your model on HF Hub TITLE = "🎭 Shakespeare Authenticator" DESCRIPTION = """ Distinguish authentic Shakespearean text from modern imitations using AI. This model analyzes linguistic patterns, vocabulary, and stylistic elements to determine if text was written by William Shakespeare or is a modern creation. """ # Global variables for model caching model = None tokenizer = None device = None def load_model(): """Load model and tokenizer with caching and error handling""" global model, tokenizer, device if model is not None: return model, tokenizer, device print("🔄 Loading model from Hugging Face Hub...") start_time = time.time() try: # Load model with explicit trust for remote code model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, trust_remote_code=True ) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) # Set to evaluation mode model.eval() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = model.to(device) load_time = time.time() - start_time print(f"✅ Model loaded successfully in {load_time:.2f}s") print(f"📊 Model device: {device}") print(f"đŸˇī¸ Model labels: {model.config.id2label}") return model, tokenizer, device except Exception as e: print(f"❌ Error loading model: {e}") # Fallback to CPU if CUDA fails try: model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model.eval() device = torch.device('cpu') model = model.to(device) print(f"✅ Model loaded on CPU as fallback") return model, tokenizer, device except Exception as e2: print(f"❌ Complete failure loading model: {e2}") raise e2 # Pre-load model at startup try: model, tokenizer, device = load_model() print("🎉 Model pre-loaded and ready for inference!") except Exception as e: print(f"âš ī¸ Model loading failed: {e}") def classify_shakespeare(text): """ Classify whether text is authentic Shakespeare or modern imitation """ if not text.strip(): return { "error": "Please enter some text to analyze!", "prediction": None, "confidence": None, "detailed_breakdown": None } # Ensure model is loaded if model is None: try: load_model() except: return { "error": "Model failed to load. Please refresh the page.", "prediction": None, "confidence": None, "detailed_breakdown": None } try: # Tokenize the input text inputs = tokenizer( text, return_tensors="pt", truncation=True, padding=True, max_length=512 ) # Move to device inputs = {k: v.to(device) for k, v in inputs.items()} # Make prediction with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probabilities = torch.softmax(logits, dim=1) prediction = torch.argmax(logits, dim=1).item() confidence = probabilities[0][prediction].item() # Map prediction to labels (using your model's label mapping) labels = {0: "Modern Creation", 1: "Authentic Shakespeare"} result = labels[prediction] # Confidence scores confidence_pct = confidence * 100 modern_confidence = probabilities[0][0].item() * 100 shakespeare_confidence = probabilities[0][1].item() * 100 return { "error": None, "prediction": result, "confidence": f"{confidence_pct:.1f}%", "detailed_breakdown": { "Modern Creation": f"{modern_confidence:.1f}%", "Authentic Shakespeare": f"{shakespeare_confidence:.1f}%" }, "raw_scores": { "modern": modern_confidence, "shakespeare": shakespeare_confidence } } except Exception as e: return { "error": f"Prediction error: {str(e)}", "prediction": None, "confidence": None, "detailed_breakdown": None } def create_visual_output(result): """Create beautiful visual output for the prediction""" if result["error"]: return f"""

❌ Error

{result['error']}

""" # Determine emoji and color based on prediction if "Authentic" in result["prediction"]: emoji = "✅" color = "#00b894" explanation = "This text exhibits characteristics of authentic Shakespearean writing." else: emoji = "🔄" color = "#e17055" explanation = "This text appears to be a modern creation or imitation." # Create confidence bar visualization modern_score = result["raw_scores"]["modern"] shakespeare_score = result["raw_scores"]["shakespeare"] confidence_bars = f"""
Modern Creation {modern_score:.1f}%
Authentic Shakespeare {shakespeare_score:.1f}%
""" output = f"""

{emoji} Analysis Results

{result['prediction']}

Overall Confidence: {result['confidence']}

{explanation}

Confidence Breakdown:

{confidence_bars}
Powered by fine-tuned BERT â€ĸ View Model on Hugging Face
""" return output def predict_shakespeare(text): """ Main prediction function for Gradio interface """ start_time = time.time() result = classify_shakespeare(text) processing_time = time.time() - start_time print(f"🔍 Processed text ({len(text)} chars) in {processing_time:.2f}s") return create_visual_output(result) # Example texts examples = [ ["To be or not to be, that is the question"], ["Friends, Romans, countrymen, lend me your ears"], ["What light through yonder window breaks?"], ["Shall I compare thee to a summer's day?"], ["The meeting is scheduled for 2 PM in the conference room"], ["I think therefore I am - modern philosophical statement"], ["Now is the winter of our discontent made glorious summer by this sun of York"], ["O Romeo, Romeo, wherefore art thou Romeo?"] ] # Create the Gradio interface with gr.Blocks( theme=gr.themes.Soft(), title=TITLE, css=""" .gradio-container { max-width: 800px !important; margin: 0 auto !important; } .example-text { font-style: italic; color: #666; } footer { display: none !important; } """ ) as demo: # Header section gr.Markdown(f""" # {TITLE} {DESCRIPTION} """) with gr.Row(): with gr.Column(scale=1): # Input section text_input = gr.Textbox( label="📝 Enter Text to Analyze", placeholder="Paste Shakespearean text or modern writing here...", lines=4, max_lines=6, elem_id="text-input" ) with gr.Row(): submit_btn = gr.Button("🔍 Analyze Text", variant="primary", scale=2) clear_btn = gr.Button("đŸ—‘ī¸ Clear", variant="secondary", scale=1) # Examples gr.Examples( examples=examples, inputs=text_input, label="💡 Try these examples:", examples_per_page=4 ) with gr.Column(scale=1): # Output section output = gr.HTML( label="📊 Analysis Results", value="""

👆 Enter text to analyze

Paste any text above and click "Analyze Text" to see if it's authentic Shakespeare!

""" ) # Model information with gr.Accordion("â„šī¸ About This Model", open=False): gr.Markdown(f""" **Model Details** - **Model**: `{MODEL_NAME}` on Hugging Face Hub - **Architecture**: BERT-base fine-tuned on Shakespearean text classification - **Training Data**: 400,000+ samples of Shakespeare vs modern dialogue - **Task**: Binary text classification (Authentic Shakespeare vs Modern Creation) **How It Works** - Analyzes linguistic patterns, vocabulary, and stylistic elements - Uses transformer architecture to understand context and syntax - Returns confidence scores for both classification categories **Best Practices** - Works best with complete sentences or passages - More accurate with longer text samples - Designed for Early Modern English vs Contemporary English distinction """) # Event handlers submit_btn.click( fn=predict_shakespeare, inputs=text_input, outputs=output ) text_input.submit( fn=predict_shakespeare, inputs=text_input, outputs=output ) clear_btn.click( fn=lambda: ("", """

👆 Enter text to analyze

Paste any text above and click "Analyze Text" to see if it's authentic Shakespeare!

"""), inputs=[], outputs=[text_input, output] ) # Launch the application - SIMPLIFIED FOR SPACES if __name__ == "__main__": demo.launch()