| |
|
| |
|
| | import gradio as gr |
| | import re |
| |
|
| | |
| | |
| | positive_words = {"love", "great", "good", "amazing", "awesome", "happy"} |
| | negative_words = {"hate", "terrible", "awful", "bad", "horrible", "sad"} |
| |
|
| | |
| | sarcasm_cues = {"not!", "jk", "/s"} |
| |
|
| | |
| | intensifiers = {"very", "extremely", "so", "super", "really"} |
| |
|
| | |
| | |
| | |
| |
|
| | def layer_1_basic(text): |
| | """ |
| | Layer 1: |
| | - Count how many known positive words vs. negative words. |
| | - If pos >= neg, label = "Positive", else "Negative" |
| | """ |
| | words = re.findall(r"[a-zA-Z]+", text.lower()) |
| | pos_count = sum(1 for w in words if w in positive_words) |
| | neg_count = sum(1 for w in words if w in negative_words) |
| |
|
| | label = "Positive" if pos_count >= neg_count else "Negative" |
| | return label, pos_count, neg_count |
| |
|
| | def layer_2_sarcasm_or_flip(text, prev_label): |
| | """ |
| | Layer 2: |
| | - Check for sarcasm cues (like "not!", "jk", "/s"). |
| | - If found, flip the classification from Layer 1. |
| | """ |
| | text_lower = text.lower() |
| | if any(cue in text_lower for cue in sarcasm_cues): |
| | flipped = "Positive" if prev_label == "Negative" else "Negative" |
| | return flipped + " (sarcasm-corrected)" |
| | else: |
| | return prev_label |
| |
|
| | def layer_3_intensifiers(text, prev_label): |
| | """ |
| | Layer 3: |
| | - If there's an intensifier ("very", "extremely", "so", "super", "really"), |
| | we add "(HIGH CONFIDENCE)" to the label. |
| | """ |
| | text_lower = text.lower() |
| | if any(i in text_lower for i in intensifiers): |
| | return prev_label + " (HIGH CONFIDENCE)" |
| | else: |
| | return prev_label |
| |
|
| | def deep_classify(text): |
| | """ |
| | Pass text through 3 layers, returning a final classification |
| | plus details about each layer. |
| | """ |
| | |
| | layer1_label, pos_count, neg_count = layer_1_basic(text) |
| |
|
| | |
| | layer2_label = layer_2_sarcasm_or_flip(text, layer1_label) |
| |
|
| | |
| | final_label = layer_3_intensifiers(text, layer2_label) |
| |
|
| | return layer1_label, layer2_label, final_label, pos_count, neg_count |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | sample_statements = [ |
| | "I love this movie!", |
| | "This place is awful.", |
| | "The food was amazing, not!", |
| | "I'm so happy right now.", |
| | "Everything was terrible... JK!", |
| | "I really hate slow service.", |
| | "This is so good, I'm super impressed.", |
| | ] |
| |
|
| | def simulate_layers(sample_choice, custom_text): |
| | """ |
| | 1) If the user typed something in 'custom_text', use that. |
| | 2) Otherwise, use the selected 'sample_choice'. |
| | 3) Run the text through the 3-layer classification. |
| | 4) Return an HTML block describing each layer's result. |
| | """ |
| | text_to_classify = "" |
| | if custom_text.strip(): |
| | text_to_classify = custom_text.strip() |
| | elif sample_choice: |
| | text_to_classify = sample_choice |
| | else: |
| | return "<p style='color:red;'>Please provide a sentence.</p>" |
| |
|
| | layer1_label, layer2_label, final_label, pos_count, neg_count = deep_classify(text_to_classify) |
| |
|
| | |
| | html_output = f""" |
| | <h3>Deep Learning Simulation</h3> |
| | <p><b>Input Text:</b> {text_to_classify}</p> |
| | |
| | <ol> |
| | <li><b>Layer 1</b> checks positive vs. negative words (simple count). |
| | <br>Positive words found: {pos_count}, Negative words found: {neg_count} |
| | <br>Layer 1 Classification: <i>{layer1_label}</i> |
| | </li> |
| | <li><b>Layer 2</b> checks for sarcasm cues (like "not!", "jk", "/s"). |
| | <br>Result after sarcasm check: <i>{layer2_label}</i> |
| | </li> |
| | <li><b>Layer 3</b> looks for intensifiers ("very", "extremely", "so", "super", "really"). |
| | <br>Final Classification: <span style="color:blue; font-weight:bold;">{final_label}</span> |
| | </li> |
| | </ol> |
| | |
| | <h4>How this relates to Deep Learning</h4> |
| | <ul> |
| | <li>In actual deep learning, each layer learns more complex |
| | <i>features</i> automatically from data, rather than |
| | using manually defined checks.</li> |
| | <li>However, the <b>concept</b> of passing through multiple |
| | layers to refine the result <i>is</i> how neural networks work.</li> |
| | </ul> |
| | |
| | <p><b>Observation:</b> This layered approach can handle nuances |
| | like sarcasm or intensifiers better than a single rule or single-layer model.</p> |
| | """ |
| | return html_output |
| |
|
| | with gr.Blocks(css="footer{display:none !important}") as demo: |
| | gr.Markdown("Deep Learning (Layered Approach) Simulation") |
| | gr.Markdown("Pick a sample statement **or** type your own, then click **Simulate**.") |
| |
|
| | with gr.Row(): |
| | sample_choice = gr.Dropdown( |
| | label="Sample Statement", |
| | choices=["(None)"] + sample_statements, |
| | value="(None)", |
| | ) |
| | custom_text = gr.Textbox( |
| | label="Custom Text", |
| | placeholder="Type your own statement here..." |
| | ) |
| | simulate_button = gr.Button("Simulate Layers") |
| | output = gr.HTML() |
| |
|
| | def simulate_wrapper(smpl, cstm): |
| | |
| | if smpl == "(None)": |
| | smpl = "" |
| | return simulate_layers(smpl, cstm) |
| |
|
| | simulate_button.click( |
| | fn=simulate_wrapper, |
| | inputs=[sample_choice, custom_text], |
| | outputs=[output] |
| | ) |
| |
|
| | demo.launch() |
| |
|