sree4411 commited on
Commit
1c1d531
Β·
verified Β·
1 Parent(s): 011d79e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -107
app.py CHANGED
@@ -1,50 +1,39 @@
1
  import streamlit as st
 
 
2
  from gensim.models import Word2Vec
3
 
4
- # Apply custom styles using Streamlit's markdown
5
- st.markdown("""
6
- <style>
7
- .main-title { color: #FF5733; font-size: 20px; font-weight: bold; text-align: center; }
8
- .section-title { color: #2E86C1; font-size: 30px; font-weight: bold; margin-top: 20px; }
9
- .sub-title { color: #27AE60; font-size: 24px; font-weight: bold; margin-top: 10px; }
10
- .text { font-size: 18px; }
11
- </style>
12
- """, unsafe_allow_html=True)
13
-
14
  # Title
15
- st.markdown('<p class="main-title">Introduction to NLP</p>', unsafe_allow_html=True)
16
 
17
  # Section: What is NLP?
18
- st.markdown('<p class="section-title">What is NLP?</p>', unsafe_allow_html=True)
19
- st.markdown("""
20
  Natural Language Processing (NLP) is a subfield of artificial intelligence that enables computers to process, understand, and generate human language.
21
- """)
22
 
23
- # Section: Applications of NLP
24
- st.markdown('<p class="sub-title">Applications of NLP:</p>', unsafe_allow_html=True)
25
- st.markdown("""
26
- - βœ… Chatbots & Virtual Assistants (e.g., Siri, Alexa)
27
- - βœ… Sentiment Analysis (e.g., Product reviews, Social Media monitoring)
28
- - βœ… Machine Translation (e.g., Google Translate)
29
- - βœ… Text Summarization (e.g., News article summaries)
30
- - βœ… Speech Recognition (e.g., Voice commands)
31
  """)
32
 
33
  # Section: NLP Terminologies
34
- st.markdown('<p class="section-title">NLP Terminologies</p>', unsafe_allow_html=True)
35
- st.markdown("""
36
- **Corpus**: A collection of text documents used for NLP tasks.
37
- **Tokenization**: Splitting text into individual words or phrases.
38
- **Stop Words**: Common words (e.g., "the", "is") that are often removed.
39
- **Stemming**: Reducing words to their base form (e.g., "running" β†’ "run").
40
- **Lemmatization**: More advanced than stemming; converts words to their dictionary form.
41
- **NER (Named Entity Recognition)**: Identifies entities like names, dates, and locations.
42
- **Sentiment Analysis**: Determines the sentiment (positive, negative, neutral) of a text.
43
- **n-grams**: Sequences of 'n' consecutive words (e.g., "New York" is a bi-gram).
44
  """)
45
 
46
  # Section: Text Representation Methods
47
- st.markdown('<p class="section-title">Text Representation Methods</p>', unsafe_allow_html=True)
48
  methods = [
49
  "Bag of Words",
50
  "TF-IDF",
@@ -53,100 +42,108 @@ methods = [
53
  ]
54
  selected_method = st.radio("Select a text representation method:", methods)
55
 
 
 
 
 
 
 
 
56
  if selected_method == "Bag of Words":
57
- st.markdown('<p class="sub-title">Bag of Words (BoW)</p>', unsafe_allow_html=True)
58
- st.markdown("""
59
  **Definition**: Represents text as a collection of word counts, ignoring grammar and word order.
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  """)
61
- st.markdown("""
62
- **Uses:**
63
- - βœ… Sentiment analysis
64
- - βœ… Document classification
65
- - βœ… Information retrieval
66
-
67
- **Advantages:**
68
- - βœ… Simple and easy to implement
69
- - βœ… Works well with traditional ML models
70
-
71
- **Disadvantages:**
72
- - ❌ Ignores word order and context
73
- - ❌ High-dimensionality for large vocabularies
74
- """)
75
 
76
  elif selected_method == "TF-IDF":
77
- st.markdown('<p class="sub-title">Term Frequency-Inverse Document Frequency (TF-IDF)</p>', unsafe_allow_html=True)
78
- st.markdown("""
79
  **Definition**: Weighs words based on their frequency in a document and across all documents.
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  """)
81
- st.markdown("""
82
- **Uses:**
83
- - βœ… Information retrieval (e.g., search engines)
84
- - βœ… Text classification
85
- - βœ… Keyword extraction
86
-
87
- **Advantages:**
88
- - βœ… Reduces the impact of common words
89
- - βœ… Highlights important words
90
-
91
- **Disadvantages:**
92
- - ❌ Still ignores word order
93
- - ❌ Does not capture deep semantics
94
- """)
95
 
96
  elif selected_method == "One-Hot Encoding":
97
- st.markdown('<p class="sub-title">One-Hot Encoding</p>', unsafe_allow_html=True)
98
- st.markdown("""
99
  **Definition**: Represents words as binary vectors where each word has a unique position in a vocabulary.
 
 
 
 
 
 
 
 
 
 
 
 
100
  """)
101
- st.markdown("""
102
- **Uses:**
103
- - βœ… Simple NLP tasks
104
- - βœ… Word-level feature engineering
105
-
106
- **Advantages:**
107
- - βœ… Simple to understand
108
- - βœ… Works well with small vocabulary sizes
109
-
110
- **Disadvantages:**
111
- - ❌ Inefficient for large vocabularies
112
- - ❌ No information on word meaning
113
- """)
114
 
115
  elif selected_method == "Word Embeddings (Word2Vec)":
116
- st.markdown('<p class="sub-title">Word Embeddings (Word2Vec)</p>', unsafe_allow_html=True)
117
- st.markdown("""
118
  **Definition**: Converts words into dense numerical vectors capturing semantic relationships.
119
- """)
120
- st.markdown("""
121
- **Uses:**
122
- - βœ… Machine translation
123
- - βœ… Speech recognition
124
- - βœ… Sentiment analysis
125
-
126
- **Advantages:**
127
- - βœ… Captures semantic relationships
128
- - βœ… Works well for deep learning models
129
-
130
- **Disadvantages:**
131
- - ❌ Requires large datasets to train
132
- - ❌ Computationally expensive
133
- """)
134
 
135
- # Sample texts for Word2Vec model
136
- texts = [
137
- "Natural Language Processing is fascinating.",
138
- "Natural Language Processing involves understanding human language.",
139
- "The field of NLP is growing rapidly."
140
- ]
 
 
 
 
 
 
 
141
  model = Word2Vec(sentences=[text.split() for text in texts], vector_size=100, window=5, min_count=1, workers=4)
142
  word_vectors = model.wv
143
  word = 'natural'
144
  if word in word_vectors:
145
- st.markdown(f'Word2Vec Representation of "{word}":')
146
  st.write(word_vectors[word])
147
  else:
148
- st.markdown(f'Word "{word}" not found in the vocabulary.')
149
 
150
  # Footer
151
- st.markdown('<hr>', unsafe_allow_html=True)
152
- st.markdown('<p class="text" style="text-align:center;">Developed with ❀️ using Streamlit for NLP enthusiasts.</p>', unsafe_allow_html=True)
 
1
  import streamlit as st
2
+ from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
3
+ import numpy as np
4
  from gensim.models import Word2Vec
5
 
 
 
 
 
 
 
 
 
 
 
6
  # Title
7
+ st.title("Introduction to NLP")
8
 
9
  # Section: What is NLP?
10
+ st.header("What is NLP?")
11
+ st.write("""
12
  Natural Language Processing (NLP) is a subfield of artificial intelligence that enables computers to process, understand, and generate human language.
 
13
 
14
+ ### Applications of NLP:
15
+ - **Chatbots & Virtual Assistants** (e.g., Siri, Alexa)
16
+ - **Sentiment Analysis** (e.g., Product reviews, Social Media monitoring)
17
+ - **Machine Translation** (e.g., Google Translate)
18
+ - **Text Summarization** (e.g., News article summaries)
19
+ - **Speech Recognition** (e.g., Voice commands)
 
 
20
  """)
21
 
22
  # Section: NLP Terminologies
23
+ st.header("NLP Terminologies")
24
+ st.write("""
25
+ - **Corpus**: A collection of text documents used for NLP tasks.
26
+ - **Tokenization**: Splitting text into individual words or phrases.
27
+ - **Stop Words**: Common words (e.g., "the", "is") that are often removed.
28
+ - **Stemming**: Reducing words to their base form (e.g., "running" β†’ "run").
29
+ - **Lemmatization**: More advanced than stemming; it converts words to their dictionary form.
30
+ - **Named Entity Recognition (NER)**: Identifies entities like names, dates, and locations.
31
+ - **Sentiment Analysis**: Determines the sentiment (positive, negative, neutral) of a text.
32
+ - **n-grams**: Sequences of 'n' consecutive words (e.g., "New York" is a bi-gram).
33
  """)
34
 
35
  # Section: Text Representation Methods
36
+ st.header("Text Representation Methods")
37
  methods = [
38
  "Bag of Words",
39
  "TF-IDF",
 
42
  ]
43
  selected_method = st.radio("Select a text representation method:", methods)
44
 
45
+ # Sample Texts
46
+ texts = [
47
+ "Natural Language Processing is fascinating.",
48
+ "Natural Language Processing involves understanding human language.",
49
+ "The field of NLP is growing rapidly."
50
+ ]
51
+
52
  if selected_method == "Bag of Words":
53
+ st.subheader("Bag of Words (BoW)")
54
+ st.write("""
55
  **Definition**: Represents text as a collection of word counts, ignoring grammar and word order.
56
+
57
+ **Uses**:
58
+ - Sentiment analysis
59
+ - Document classification
60
+ - Information retrieval
61
+
62
+ **Advantages**:
63
+ βœ… Simple and easy to implement
64
+ βœ… Works well with traditional ML models
65
+
66
+ **Disadvantages**:
67
+ ❌ Ignores word order and context
68
+ ❌ High-dimensionality for large vocabularies
69
  """)
70
+ vectorizer = CountVectorizer()
71
+ X_bow = vectorizer.fit_transform(texts)
72
+ st.write("Feature Names:", vectorizer.get_feature_names_out())
73
+ st.write("Bag of Words Representation:", X_bow.toarray())
 
 
 
 
 
 
 
 
 
 
74
 
75
  elif selected_method == "TF-IDF":
76
+ st.subheader("Term Frequency-Inverse Document Frequency (TF-IDF)")
77
+ st.write("""
78
  **Definition**: Weighs words based on their frequency in a document and across all documents.
79
+
80
+ **Uses**:
81
+ - Information retrieval (e.g., search engines)
82
+ - Text classification
83
+ - Keyword extraction
84
+
85
+ **Advantages**:
86
+ βœ… Reduces the impact of common words
87
+ βœ… Highlights important words
88
+
89
+ **Disadvantages**:
90
+ ❌ Still ignores word order
91
+ ❌ Does not capture deep semantics
92
  """)
93
+ tfidf_vectorizer = TfidfVectorizer()
94
+ X_tfidf = tfidf_vectorizer.fit_transform(texts)
95
+ st.write("Feature Names:", tfidf_vectorizer.get_feature_names_out())
96
+ st.write("TF-IDF Representation:", X_tfidf.toarray())
 
 
 
 
 
 
 
 
 
 
97
 
98
  elif selected_method == "One-Hot Encoding":
99
+ st.subheader("One-Hot Encoding")
100
+ st.write("""
101
  **Definition**: Represents words as binary vectors where each word has a unique position in a vocabulary.
102
+
103
+ **Uses**:
104
+ - Simple NLP tasks
105
+ - Word-level feature engineering
106
+
107
+ **Advantages**:
108
+ βœ… Simple to understand
109
+ βœ… Works well with small vocabulary sizes
110
+
111
+ **Disadvantages**:
112
+ ❌ Inefficient for large vocabularies
113
+ ❌ No information on word meaning
114
  """)
115
+ one_hot_vectorizer = CountVectorizer(binary=True)
116
+ X_one_hot = one_hot_vectorizer.fit_transform(texts)
117
+ st.write("Feature Names:", one_hot_vectorizer.get_feature_names_out())
118
+ st.write("One-Hot Encoding Representation:", X_one_hot.toarray())
 
 
 
 
 
 
 
 
 
119
 
120
  elif selected_method == "Word Embeddings (Word2Vec)":
121
+ st.subheader("Word Embeddings (Word2Vec)")
122
+ st.write("""
123
  **Definition**: Converts words into dense numerical vectors capturing semantic relationships.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
+ **Uses**:
126
+ - Machine translation
127
+ - Speech recognition
128
+ - Sentiment analysis
129
+
130
+ **Advantages**:
131
+ βœ… Captures semantic relationships
132
+ βœ… Works well for deep learning models
133
+
134
+ **Disadvantages**:
135
+ ❌ Requires large datasets to train
136
+ ❌ Computationally expensive
137
+ """)
138
  model = Word2Vec(sentences=[text.split() for text in texts], vector_size=100, window=5, min_count=1, workers=4)
139
  word_vectors = model.wv
140
  word = 'natural'
141
  if word in word_vectors:
142
+ st.write(f"Word2Vec Representation of '{word}':")
143
  st.write(word_vectors[word])
144
  else:
145
+ st.write(f"Word '{word}' not found in the vocabulary.")
146
 
147
  # Footer
148
+ st.write("---")
149
+ st.write("Developed with ❀️ using Streamlit for NLP enthusiasts.")