Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import nltk | |
| import re | |
| nltk.download('punkt') | |
| nltk.download('wordnet') | |
| from nltk.corpus import wordnet | |
| nltk.download("averaged_perceptron_tagger") | |
| nltk.download('wordnet') | |
| def find_antonyms(word, tag): | |
| if tag.startswith('JJ') or (tag.startswith('RB') and word not in ["not", "no"]) or (tag.startswith('VB') and word not in ["be", "have", "do", "will", "shall", "are", "is", "was", "were"]): | |
| antonyms = [] | |
| for syn in wordnet.synsets(word): | |
| for lemma in syn.lemmas(): | |
| if lemma.antonyms(): | |
| antonyms.append(lemma.antonyms()[0].name()) | |
| return antonyms | |
| return [] | |
| def insert_not(sentence): | |
| words = nltk.word_tokenize(sentence) | |
| pos_tags = nltk.pos_tag(words) | |
| aux_verbs = ["be", "have", "do", "will", "shall", "are", "is", "was", "were", "am", "does", "has", "had", "can", "could", "may", "might", "must", "should", "would"] | |
| new_words = [] | |
| #skip_next = False | |
| flag = False | |
| for i, (word, tag) in enumerate (pos_tags): | |
| new_words.append(word) | |
| if word.lower() in aux_verbs: | |
| if i + 1 < len(words) and words[i+1].lower() != 'not' and not flag: | |
| new_words.append("not") | |
| flag = True | |
| return " ".join(new_words) | |
| def modify_sentence(sentence): | |
| words = nltk.word_tokenize(sentence) | |
| pos_tagger = nltk.pos_tag(words) | |
| new_sentence = insert_not(sentence) | |
| if len(new_sentence) != len(sentence): | |
| return new_sentence | |
| else: | |
| new_words = [] | |
| for word, tag in pos_tagger: | |
| antonyms = find_antonyms(word, tag) | |
| if antonyms: | |
| new_word = antonyms[0] | |
| else: | |
| new_word = word | |
| new_words.append(new_word) | |
| new_sentence = " ".join(new_words) | |
| # print(sentence) | |
| # print(new_sentence) | |
| if new_sentence == sentence: | |
| new_sentence = insert_not(sentence) | |
| return new_sentence | |
| def remove_no_not(sentence): | |
| words_to_remove = ['no', 'not', "n't"] | |
| pattern = r'\b(' + '|'.join(words_to_remove) + r')\b' | |
| cleaned_sentence = re.sub(pattern, '', sentence) | |
| cleaned_sentence = re.sub(r'\s+', ' ', cleaned_sentence).strip() | |
| # print(cleaned_sentence) | |
| return cleaned_sentence | |
| def transform_contractions(sentence): | |
| contractions_mapping = { | |
| "don't": "do", | |
| "doesn't": "does", | |
| "won't": "will", | |
| "isn't": "is", | |
| "aren't": "are", | |
| "wasn't": "was", | |
| "weren't": "were", | |
| "haven't": "have", | |
| "hasn't": "has", | |
| "hadn't": "had", | |
| "can't": "can", | |
| "couldn't": "could", | |
| "shouldn't": "should", | |
| "mightn't": "might", | |
| "mustn't": "must" | |
| # Add any other contractions you need to handle | |
| } | |
| for contraction, replacement in contractions_mapping.items(): | |
| pattern = r'\b' + contraction + r'\b' | |
| sentence = re.sub(pattern, replacement, sentence) | |
| return sentence | |
| def clean_text(text): | |
| text = re.sub(r'[^\w\s]', '', text) | |
| text = text.strip() | |
| return text | |
| def main_func(sentence): | |
| # sentence = clean_text(sentence) | |
| sentence = sentence.lower() | |
| removed_dont = transform_contractions(sentence) | |
| if len(removed_dont) != len(sentence): | |
| return removed_dont | |
| else: | |
| removed_no_sentence = remove_no_not(sentence) | |
| # print(removed_no_sentence) | |
| if len(removed_no_sentence) != len(sentence): | |
| return removed_no_sentence | |
| else: | |
| modified_sentence = modify_sentence(sentence) | |
| return modified_sentence | |
| demo = gr.Interface(title="POS/NEG Sentence APP", fn=main_func, inputs="text", outputs="text", css="footer {visibility: hidden}", examples=[ | |
| ["I hate football"], | |
| ["Don't I like that food?"], | |
| ["I can't Swim"], | |
| ["Onion price is increasing"]]) | |
| if __name__ == "__main__": | |
| demo.launch(show_api=False) | |