Karthix1 commited on
Commit
0ff8189
·
verified ·
1 Parent(s): a670821

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -18,12 +18,6 @@ from nltk.stem import WordNetLemmatizer
18
  nltk.download('punkt')
19
  nltk.download('stopwords')
20
  nltk.download('wordnet')
21
- def preprocess_nltk(text):
22
- lemmatizer = WordNetLemmatizer()
23
- tokens = word_tokenize(text.lower()) # Tokenization
24
- stop_words = set(stopwords.words("english"))
25
- filtered_tokens = [lemmatizer.lemmatize(token) for token in tokens if token.isalnum() and token not in stop_words]
26
- return " ".join(filtered_tokens)
27
 
28
 
29
 
@@ -31,14 +25,23 @@ def preprocess_nltk(text):
31
 
32
 
33
 
34
- with open("sentiment_analysis_model.pkl", "rb") as file:
35
- pipe2 = pickle.load(file)
36
  def prediction(text):
37
- text_processed=(preprocess_nltk(text))
38
- ans=pipe2.predict([text_processed])
39
- classes = ['Irrelevant', 'Natural', 'Negative', 'Positive']
40
- predicted_label = ans[0]
41
- return(f"The above text is:{classes[predicted_label]}" )
 
 
 
 
 
 
 
 
 
42
 
43
  pre = gr.Interface(
44
  fn=prediction,
 
18
  nltk.download('punkt')
19
  nltk.download('stopwords')
20
  nltk.download('wordnet')
 
 
 
 
 
 
21
 
22
 
23
 
 
25
 
26
 
27
 
28
+
29
+
30
  def prediction(text):
31
+
32
+ def preprocess_nltk(text):
33
+ lemmatizer = WordNetLemmatizer()
34
+ tokens = word_tokenize(text.lower()) # Tokenization
35
+ stop_words = set(stopwords.words("english"))
36
+ filtered_tokens = [lemmatizer.lemmatize(token) for token in tokens if token.isalnum() and token not in stop_words]
37
+ return " ".join(filtered_tokens)
38
+ with open("sentiment_analysis_model.pkl", "rb") as file:
39
+ pipe2 = pickle.load(file)
40
+ text_processed=(preprocess_nltk(text))
41
+ ans=pipe2.predict([text_processed])
42
+ classes = ['Irrelevant', 'Natural', 'Negative', 'Positive']
43
+ predicted_label = ans[0]
44
+ return(f"The above text is:{classes[predicted_label]}" )
45
 
46
  pre = gr.Interface(
47
  fn=prediction,