RohitGuptaAI commited on
Commit
ec40672
·
1 Parent(s): 49b04e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -60
app.py CHANGED
@@ -1,63 +1,56 @@
1
- import numpy as np
2
- import pandas as pd
3
- from sklearn.feature_extraction.text import CountVectorizer
4
- from sklearn.naive_bayes import MultinomialNB
5
- from sklearn.model_selection import train_test_split
6
- from sklearn.metrics import accuracy_score
7
- from sklearn.metrics import confusion_matrix
8
- import json
9
- import string
10
- import string
11
- import re
12
- from nltk.corpus import stopwords
13
- from nltk.tokenize import word_tokenize
14
- import gradio as gr
15
- import joblib
16
  import nltk
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- nltk.download('stopwords')
19
- nltk.download('punkt')
20
-
21
- # Load the trained model
22
- model = joblib.load('model.bin')
23
-
24
- def remove_punctuation(text):
25
- punctuation_free = "".join([i for i in text if i not in string.punctuation])
26
- return punctuation_free
27
-
28
- def vectorize_text(texts):
29
- vectorizer = CountVectorizer()
30
- vectorizer.fit(texts)
31
- text_vectorized = vectorizer.transform(texts)
32
- return text_vectorized, vectorizer
33
-
34
- def test_model(text):
35
- # Convert text to lowercase
36
- text = text.lower()
37
-
38
- # Remove punctuation
39
- text = remove_punctuation(text)
40
-
41
- # Remove numbers
42
- text = re.sub(r'\d+', '', text)
43
-
44
- # Remove stopwords
45
- stopwords_set = set(stopwords.words('english'))
46
- tokens = word_tokenize(text)
47
- filtered_text = [word for word in tokens if word not in stopwords_set]
48
-
49
- # Join the filtered tokens back into a string
50
- preprocessed_text = ' '.join(filtered_text)
51
-
52
- # Vectorize the preprocessed text
53
- vectorize_texts = vectorize_text([preprocessed_text])
54
-
55
- # Make prediction on the vectorized text
56
- prediction = model.predict(vectorize_texts[0])[0]
57
-
58
- # Return the prediction
59
- return prediction
60
-
61
- # Create the Gradio interface
62
- iface = gr.Interface(fn=test_model, inputs="text", outputs="text", title="Text Classification")
63
  iface.launch()
 
1
+ from nltk.tokenize import sent_tokenize
2
+ from language_tool_python import LanguageTool
3
+ from nltk.sentiment import SentimentIntensityAnalyzer
4
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
5
  import nltk
6
+ nltk.download('vader_lexicon')
7
+
8
+ # Initialize LanguageTool object outside the function
9
+ tool = LanguageTool('en-US')
10
+ sia = SentimentIntensityAnalyzer()
11
+
12
+ def grammar_check(text):
13
+ matches = tool.check(text)
14
+ corrected_sentences = tool.correct(text)
15
+ return corrected_sentences
16
+
17
+ def analyze_sentiment(text):
18
+ sentiment_scores = sia.polarity_scores(text)
19
+
20
+ # Positive sentiment: score > 0
21
+ # Neutral sentiment: score = 0
22
+ # Negative sentiment: score < 0
23
+ sentiment = ""
24
+ if sentiment_scores['compound'] > 0:
25
+ sentiment = "Positive"
26
+ elif sentiment_scores['compound'] == 0:
27
+ sentiment = "Neutral"
28
+ else:
29
+ sentiment = "Negative"
30
+
31
+ return sentiment
32
+
33
+ def sentence_generator(text):
34
+ sentences = sent_tokenize(text)
35
+ for sentence in sentences:
36
+ yield sentence
37
+
38
+ def CsharpGrammarly(text):
39
+ corrected_sentences = grammar_check(text)
40
+ sentiment_result = analyze_sentiment(corrected_sentences)
41
+ total_words = len(text.split())
42
+ return corrected_sentences, sentiment_result, total_words
43
+
44
+ iface = gr.Interface(
45
+ fn=CsharpGrammarly,
46
+ inputs=gr.inputs.Textbox(placeholder="Enter your text here..."),
47
+ outputs=[
48
+ gr.outputs.Textbox(label="Modified Grammar"),
49
+ gr.outputs.Textbox(label="Sentiment Analysis"),
50
+ gr.outputs.Textbox(label="Total Words Count")
51
+ ],
52
+ title="CSharpGrammarly",
53
+ description="Correct spelling, grammar, and analyze sentiment."
54
+ )
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  iface.launch()