rr commited on
Commit
54319f7
·
1 Parent(s): eca1edf

Upload tara ja jeet.txt

Browse files
Files changed (1) hide show
  1. tara ja jeet.txt +430 -0
tara ja jeet.txt ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Classification
2
+ from nltk.corpus import names
3
+ l = ([(name, 'male') for name in names.words('male.txt')] +
4
+ [(name, 'female') for name in names.words('female.txt')])
5
+ print("\nNumber of male names:")
6
+ print(len(names.words('male.txt')))
7
+ print("\nNumber of female names:")
8
+ print(len(names.words('female.txt')))
9
+ male_names = names.words('male.txt')
10
+ female_names = names.words('female.txt')
11
+ print("\nFirst 10 male names:")
12
+ print(male_names[0:15])
13
+ print("\nFirst 10 female names:")
14
+ print(female_names[0:15])
15
+ import random
16
+ random.shuffle(n)
17
+ def gender_features(word):
18
+ return{'last_letter' : word[-1]}
19
+ feature_sets = [(gender_features(n), gender) for (n, gender) in l]
20
+ train_set, test_set = feature_sets[1000:], feature_sets[:1000]
21
+ from nltk import NaiveBayesClassifier
22
+ model = NaiveBayesClassifier.train(train_set)
23
+ model.classify(gender_features('#whatever he asks'))
24
+ model.classify(gender_features('#whatever he asks'))
25
+
26
+
27
+ Clustering
28
+ Hierarchical
29
+
30
+ from sklearn.feature_extraction.text import TfidfVectorizer
31
+ from sklearn.cluster import KMeans
32
+ documents = ['Mr. and Mrs. Dursley, of number four, Privet Drive, were proud to say that they were perfectly normal, thank you very much.',
33
+ 'They were the last people you’d expect to be involved in anything strange or mysterious, because they just didn’t hold with such nonsense.',
34
+ 'Mr. Dursley was the director of a firm called Grunnings, which made drills.',
35
+ 'He was a big, beefy man with hardly any neck, although he did have a very large mustache.',
36
+ 'Mrs. Dursley was thin and blonde and had nearly twice the usual amount of neck, which came in very useful as she spent so much of her time craning over garden fences, spying on the neighbors.',
37
+ 'The Dursley s had a small son called Dudley and in their opinion there was no finer boy anywhere.']
38
+ documents
39
+ vectorizer = TfidfVectorizer(stop_words = 'english')
40
+ X = vectorizer.fit_transform(documents)
41
+ terms = vectorizer.get_feature_names()
42
+ from sklearn.metrics.pairwise import cosine_similarity
43
+ dist = 1- cosine_similarity(X)
44
+ dist
45
+ import matplotlib.pyplot as plt
46
+ from scipy.cluster.hierarchy import ward, dendrogram
47
+ linkage_matrix = ward(dist)
48
+ fig, ax = plt.subplots(figsize = (8,8)) #set size
49
+ ax = dendrogram(linkage_matrix, orientation = 'right', labels = documents);
50
+
51
+ plt.tick_params(\
52
+ axis = 'x',
53
+ which = 'both',
54
+ bottom = 'off',
55
+ top = 'off',
56
+ labelbottom = 'off')
57
+
58
+ plt.tight_layout()
59
+
60
+ K Means
61
+ model = KMeans(n_clusters = 2, init = 'k-means++', max_iter = 100, n_init = 1)
62
+ model.fit(X)
63
+ # top ten terms/words per cluster
64
+ order_centroids = model.cluster_centers_.argsort()[:, ::-1]
65
+ terms = vectorizer.get_feature_names()
66
+ for i in range(2):
67
+ print("Cluster Number:", i),
68
+ for c in order_centroids[i, :10]:
69
+ print('%s' % terms[c])
70
+ Y = vectorizer.transform(["Harry Potter is not Harry Styles"])
71
+ model.predict(Y)
72
+
73
+ Preprocessing
74
+ External Data Preprocessing(importing dataset, defining the function)
75
+ import re
76
+ import nltk
77
+ import inflect
78
+ from nltk import word_tokenize, sent_tokenize
79
+ from nltk.corpus import stopwords
80
+ from nltk.stem import LancasterStemmer, WordNetLemmatizer
81
+ file = open("dataset path.txt", encoding = 'utf-8').read()
82
+ words = word_tokenize(file)
83
+ def to_lowercase(words):
84
+ #'''Convert all the characters into lowercase from the list of tokenized words'''
85
+ new_words = []
86
+ for word in words:
87
+ new_word = word.lower()
88
+ new_words.append(new_word)
89
+ return new_words
90
+
91
+ words = to_lowercase(words)
92
+ #print(words)
93
+ def remove_punctuation(words):
94
+ #'''Remove all the punctuation marks from the list of tokenized words'''
95
+ new_words = []
96
+ for word in words:
97
+ new_word = re.sub(r'[^\w\s]', '', word)
98
+ if new_word != '':
99
+ new_words.append(new_word)
100
+ return new_words
101
+
102
+ words = remove_punctuation(words)
103
+ #print(words)
104
+ def replace_numbers(words):
105
+ #'''Replace all integer occurrences in the list of tokenized words'''
106
+ p = inflect.engine()
107
+ new_words = []
108
+ for word in words:
109
+ if word.isdigit():
110
+ new_word = p.number_to_words(word)
111
+ new_words.append(new_word)
112
+ else:
113
+ new_words.append(word)
114
+ return(new_words)
115
+
116
+ words = replace_numbers(words)
117
+ #print(words)
118
+ def remove_stopwords(words):
119
+ #'''Remove stop words from the list of tokenized words'''
120
+ new_words = []
121
+ for word in words:
122
+ if word not in stopwords.words('english'):
123
+ new_words.append(word)
124
+ return new_words
125
+
126
+ words = remove_stopwords(words)
127
+ #print(words)
128
+ def stem_words(words):
129
+ #'''Finding stem words in the list of tokenized words'''
130
+ stemmer = LancasterStemmer()
131
+ stems = []
132
+ for word in words:
133
+ stem = stemmer.stem(word)
134
+ stems.append(stem)
135
+ return stems
136
+
137
+ words = stem_words(words)
138
+ #print(words)
139
+
140
+ def lemmatize_words(words):
141
+ #'''Lemmatize verbs in the list of tokenized words'''
142
+ lemmatizer = WordNetLemmatizer()
143
+ lemmas = []
144
+ for word in words:
145
+ lemma = lemmatizer.lemmatize(word, pos = 'v')
146
+ lemmas.append(lemma)
147
+ return lemmas
148
+
149
+ words = lemmatize_words(words)
150
+ #print(words)
151
+ print(words)
152
+ Text preprocessing(non user defined)
153
+ import nltk
154
+ import re
155
+ import string
156
+ import inflect
157
+ from nltk.corpus import stopwords
158
+ from nltk import word_tokenize
159
+ series = open("dataset path.txt".txt").read()
160
+ series
161
+ series_lower = series.lower()
162
+ # Removal of numbers
163
+ result1 = re.sub(r'\d+', '', series_lower)
164
+ #result1
165
+ # Removal of punctuations
166
+ result2 = result1.translate(str.maketrans('','',string.punctuation))
167
+ #result2
168
+ # Removing white spaces
169
+ result3 = result2.strip()
170
+ #result3
171
+ # Removal of stopwords
172
+ # Tokenize the text
173
+ result3_tokens = word_tokenize(result3)
174
+ #result3_tokens
175
+ # Removing stopwords
176
+ sw = set(stopwords.words('english'))
177
+
178
+ result4 = []
179
+ for w in result3_tokens:
180
+ if w not in sw:
181
+ result4.append(w)
182
+ #result4
183
+ text_tokenize = result4
184
+ #text_tokenize
185
+ output = nltk.pos_tag(text_tokenize)
186
+ #output
187
+ Sentiment Analysis
188
+ import pandas as pd
189
+ import re
190
+ import string
191
+ from nltk.tokenize import word_tokenize
192
+ from nltk.corpus import stopwords
193
+ from nltk.stem import PorterStemmer
194
+ from nltk.stem import WordNetLemmatizer
195
+ import nltk
196
+ from wordcloud import WordCloud
197
+ import matplotlib.pyplot as plt
198
+ file = open("dataset path.txt".txt", encoding = 'utf-8').read()
199
+ # These are not required. DO Only if asked.
200
+ # this code, clean data 2 and clean data 3
201
+ cleandata1 = file.lower()
202
+ #cleandata1
203
+ cleandata2 = re.sub(r'[^\w\s]','', cleandata1)
204
+ #cleandata2
205
+ cleandata3 = re.sub(r'\d+', ' ', cleandata2)
206
+ #cleandata3
207
+ stop_words = set(stopwords.words('english'))
208
+ #stop_words
209
+ #let us remove them using function removeWords()
210
+ tokens = word_tokenize(cleandata3)
211
+ cleandata4 = [i for i in tokens if not i in stop_words]
212
+ cleandata4
213
+ cleandata4 = " ".join(str(x) for x in cleandata4)
214
+ #cleandata4
215
+ cleandata5 = ' '.join(i for i in cleandata4.split() if not (i.isalpha() and len(i)==1))
216
+ #cleandata5
217
+ cleandata6 = cleandata5.strip()
218
+ #cleandata6
219
+ ## Frequency of words
220
+ words_dict = {}
221
+ for word in cleandata6.split():
222
+ words_dict[word] = words_dict.get(word, 0)+1
223
+ for key in sorted(words_dict):
224
+ print("{}:{}".format(key,words_dict[key]))
225
+ wordcloud = WordCloud(width=480, height=480, margin=0).generate(cleandata6)
226
+
227
+ # Display the generated image:
228
+ plt.imshow(wordcloud, interpolation='bilinear')
229
+ plt.axis("off")
230
+ plt.margins(x=0, y=0)
231
+ plt.show()
232
+ #with max words
233
+ wordcloud = WordCloud(width=480, height=480, max_words=5).generate(cleandata6)
234
+ plt.figure()
235
+ plt.imshow(wordcloud, interpolation="bilinear")
236
+ plt.axis("off")
237
+ plt.margins(x=0, y=0)
238
+ plt.show()
239
+ from textblob import TextBlob
240
+ from textblob.sentiments import NaiveBayesAnalyzer
241
+
242
+ Bag of Words
243
+ from sklearn.feature_extraction.text import CountVectorizer
244
+ sentences = ["Hello how are you",
245
+ "Hi students are you all good",
246
+ "Okay lets study bag of words"]
247
+ sentences
248
+ cv = CountVectorizer()
249
+ bow = cv.fit_transform(sentences).toarray()
250
+ cv.vocabulary_
251
+ cv.get_feature_names()
252
+ bow
253
+
254
+ NLTK Basics
255
+
256
+ import nltk
257
+ from nltk.book import *
258
+ #similar
259
+ text6.similar('King')
260
+ text6.concordance('King')
261
+ sents()
262
+ len(text1)
263
+ #lines tells how many lines you want. You can run the code without the lines also
264
+ text3.concordance('lived', lines = 38)
265
+ text3.common_contexts(['earth', 'heaven'])
266
+ text1.common_contexts(['captain', 'whale'])
267
+ #text3.collocations()
268
+ text3.collocation_list()
269
+ #Put number inside bracket to get only how many is required
270
+ text6.collocation_list(5)
271
+ text6.generate(5)
272
+ len(text3)
273
+ from nltk import lm
274
+ help(lm)
275
+ text = "Hello students, we are studying Parts of Speech Tagging. Lets understand the process of\
276
+ shallow parsing or Chunking. Here were are drawing the tree corresponding to the words \
277
+ and the POS tags based on a set grammer regex patter."
278
+ words = nltk.word_tokenize(text)
279
+ #words
280
+ tags = nltk.pos_tag(words)
281
+ #tags
282
+ # idk what this is
283
+ grammar = (''' NP: {<DT><JJ><NN>} ''')
284
+ grammar
285
+ freq = FreqDist(text3)
286
+ freq
287
+ freq.most_common(50)
288
+ freq['father']
289
+ freq.plot(20, cumulative = True)
290
+ freq.plot(20)
291
+ freq.tabulate()
292
+ freq.max()
293
+ [i for i in sent3 if len(i) > 8]
294
+ [i for i in sent3 if len(i) != 3]
295
+ [i for i in sent3 if len(i) <= 3]
296
+ l = []
297
+ for i in sent3:
298
+ if((len(i)) <= 3):
299
+ l.append(i)
300
+ print(l)
301
+ # print(len(l))
302
+
303
+ Simple Regex
304
+ Regex on strings¶
305
+
306
+ import re
307
+
308
+ egstring = '''
309
+ Jessica is 15 years old, and Daniel is 27 years old.
310
+ Edward is 97 years old, and his grandfather, Oscar, is 108 years old
311
+ '''
312
+
313
+ ages = re.findall(r'\d{1,3}', egstring)
314
+ names = re.findall(r'[A-Z][a-z]*', egstring)
315
+
316
+ print(ages)
317
+ print(names)
318
+
319
+ result = re.split(r'\d{1,3}', egstring)
320
+ print(result)
321
+
322
+ string = "Python is fun"
323
+
324
+ match = re.search('\APython', string)
325
+
326
+ if match:
327
+ print("pattern found inside the string")
328
+ else:
329
+ print("pattern not found")
330
+
331
+ Email
332
+
333
+ # example
334
+ #pattern = r'\w{4}_\d{2}\w{5}.\w{4}@w{5}.\w{3}
335
+ #pattern1 = r'[a-z]+_[0-9a-z]+.[a-z]+@[a-z.]+'
336
+ #email_string = "bill_05gates.mics@gmail.com"
337
+
338
+ generic_pattern = r'[a-zA-Z0-9._]+@[a-z]+.[a-z]+'
339
+ email_string1 = "lalitisdashing_6969@gmail.com"
340
+
341
+ if(re.match(generic_pattern, email_string1) != None):
342
+ print(True)
343
+ else:
344
+ print(False)
345
+
346
+ # Entering an Email
347
+ email = input ("Enter an email")
348
+ email_list = ["bill_05gates.mics@gmail.com", "sahithi12_kanithi12@hotmail.com", " xyz@gmail.com",
349
+ "sachin.tripathi007@hotmail.com", "tripathi.sachin13@gmail.com", "qtdash@yahoo.com"]
350
+ email_list.append(email)
351
+ print(email_list)
352
+
353
+ #Function definition
354
+ def email_match(email_ls):
355
+ count = len(email_ls)
356
+ gmail_pattern = r'[a-zA-Z0-9._]+@gmail.[a-z]+'
357
+ hotmail_pattern = r'[a-zA-Z0-9._]+@hotmail.[a-z]+'
358
+ yahoo_pattern = r'[a-zA-Z0-9._]+@yahoo.[a-z]+'
359
+ print("---")
360
+
361
+ print("GMAIL MAILS")
362
+ for i in range(0,count):
363
+ if(re.match(gmail_pattern, email_ls[i]) != None):
364
+ print(email_ls[i])
365
+ print("---")
366
+
367
+ print("HOTMAIL MAILS")
368
+ for i in range(0,count):
369
+ if(re.match(hotmail_pattern, email_ls[i]) != None):
370
+ print(email_ls[i])
371
+ print("---")
372
+
373
+ print("YAHOO MAILS")
374
+ for i in range(0,count):
375
+ if(re.match(yahoo_pattern, email_ls[i]) !=None):
376
+ print(email_ls[i])
377
+
378
+ #Calling the function
379
+ email_match(email_list)
380
+
381
+ POS
382
+
383
+ import nltk
384
+ from nltk import pos_tag
385
+ from nltk import word_tokenize
386
+ sample_text = word_tokenize("The classes are reopening on 15th March in St. Joseph's College of Commerce")
387
+ sample_text
388
+
389
+ pos_tag(sample_text)
390
+
391
+ nltk.help.upenn_tagset("DT")
392
+
393
+ nltk.help.upenn_tagset("VBP")
394
+
395
+ # do for what is asked or how many ever are asked
396
+ #nltk.help.upenn_tagset("NNS")
397
+
398
+ text = nltk.Text(word.lower() for word in nltk.corpus.brown.words())
399
+ text
400
+
401
+ text.similar("boy")
402
+
403
+ text.similar("test")
404
+
405
+ var1 = nltk.tag.str2tuple("SJCC/NNP")
406
+
407
+ var1
408
+
409
+ var1[1]
410
+
411
+ sentence = '''
412
+ The/DT classes/NNS are/VBP reopening/VBG from/IN 15th/CD March'2021/NNP in/IN St./NNP Joseph/NNP 's/POS College/NNP
413
+ '''
414
+ sentence
415
+
416
+ abc = [nltk.tag.str2tuple(i) for i in sentence.split()]
417
+ abc
418
+
419
+ abc = [nltk.tag.str2tuple(i) for i in sentence.split()]
420
+ abc
421
+
422
+ nltk.corpus.brown.tagged_words()
423
+
424
+ nltk.help.brown_tagset('AT')
425
+
426
+ nltk.help.brown_tagset('NP-TL')
427
+
428
+ nltk.corpus.indian.tagged_words()
429
+
430
+ nltk.help.indian_tagset('SYM')