# -*- coding: utf-8 -*- """app.py Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1FU-FwNSEDPpESmqMM7bZNA9Qnt24hpna """ import nltk import gradio as gr from nltk.sentiment import SentimentIntensityAnalyzer # Download required NLTK resources nltk.download('punkt_tab') nltk.download('averaged_perceptron_tagger_eng') nltk.download('maxent_ne_chunker_tab') nltk.download('vader_lexicon') nltk.download('words') sia = SentimentIntensityAnalyzer() def analyze_text(text): if not text.strip(): return "Please enter valid text.", "", "" # Tokenize and POS tagging tokens = nltk.word_tokenize(text) pos_tags = nltk.pos_tag(tokens) # Format POS output pos_output = "Part-of-Speech Tags:\n" for word, tag in pos_tags: pos_output += f"{word:15} → {tag}\n" # Named Entity Recognition ner_tree = nltk.ne_chunk(pos_tags) ner_output = "Named Entities:\n" for subtree in ner_tree: if hasattr(subtree, 'label'): entity = " ".join([token for token, pos in subtree.leaves()]) label = subtree.label() ner_output += f"{entity:20} → {label}\n" # Sentiment Analysis sentiment_scores = sia.polarity_scores(text) compound = sentiment_scores['compound'] if compound >= 0.05: sentiment = "Positive" elif compound <= -0.05: sentiment = "Negative" else: sentiment = "Neutral" sentiment_output = f"Sentiment: {sentiment}" return pos_output.strip(), ner_output.strip(), sentiment_output.strip() # Gradio Interface iface = gr.Interface( fn=analyze_text, inputs=gr.Textbox(lines=5, placeholder="Enter your text here...", label="Input Text"), outputs=[ gr.Textbox(label="POS Tags"), gr.Textbox(label="Named Entities"), gr.Textbox(label="Sentiment Analysis") ], title="Text Analysis with NLTK", description="Enter any text to get Part-of-Speech tags, Named Entities, and Sentiment using NLTK." ) iface.launch()