Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,8 +10,8 @@ from fastapi import FastAPI
|
|
| 10 |
from pydantic import BaseModel
|
| 11 |
import uvicorn
|
| 12 |
|
| 13 |
-
# Initialize FastAPI
|
| 14 |
-
|
| 15 |
|
| 16 |
# Initialize the English text classification pipeline for AI detection
|
| 17 |
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
|
|
@@ -30,8 +30,8 @@ except OSError:
|
|
| 30 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
|
| 31 |
nlp = spacy.load("en_core_web_sm")
|
| 32 |
|
| 33 |
-
# Define
|
| 34 |
-
class
|
| 35 |
text: str
|
| 36 |
|
| 37 |
# Function to predict the label and score for English text (AI Detection)
|
|
@@ -39,80 +39,6 @@ def predict_en(text):
|
|
| 39 |
res = pipeline_en(text)[0]
|
| 40 |
return res['label'], res['score']
|
| 41 |
|
| 42 |
-
# Function to remove redundant and meaningless words
|
| 43 |
-
def remove_redundant_words(text):
|
| 44 |
-
doc = nlp(text)
|
| 45 |
-
meaningless_words = {"actually", "basically", "literally", "really", "very", "just"}
|
| 46 |
-
filtered_text = [token.text for token in doc if token.text.lower() not in meaningless_words]
|
| 47 |
-
return ' '.join(filtered_text)
|
| 48 |
-
|
| 49 |
-
# Function to capitalize the first letter of sentences and proper nouns
|
| 50 |
-
def capitalize_sentences_and_nouns(text):
|
| 51 |
-
doc = nlp(text)
|
| 52 |
-
corrected_text = []
|
| 53 |
-
|
| 54 |
-
for sent in doc.sents:
|
| 55 |
-
sentence = []
|
| 56 |
-
for token in sent:
|
| 57 |
-
if token.i == sent.start: # First word of the sentence
|
| 58 |
-
sentence.append(token.text.capitalize())
|
| 59 |
-
elif token.pos_ == "PROPN": # Proper noun
|
| 60 |
-
sentence.append(token.text.capitalize())
|
| 61 |
-
else:
|
| 62 |
-
sentence.append(token.text)
|
| 63 |
-
corrected_text.append(' '.join(sentence))
|
| 64 |
-
|
| 65 |
-
return '\n'.join(corrected_text) # Preserve paragraphs by joining sentences with newline
|
| 66 |
-
|
| 67 |
-
# Function to force capitalization of the first letter of every sentence
|
| 68 |
-
def force_first_letter_capital(text):
|
| 69 |
-
sentences = text.split(". ") # Split by period to get each sentence
|
| 70 |
-
capitalized_sentences = [sentence[0].capitalize() + sentence[1:] if sentence else "" for sentence in sentences]
|
| 71 |
-
return ". ".join(capitalized_sentences)
|
| 72 |
-
|
| 73 |
-
# Function to correct tense errors in a sentence
|
| 74 |
-
def correct_tense_errors(text):
|
| 75 |
-
doc = nlp(text)
|
| 76 |
-
corrected_text = []
|
| 77 |
-
for token in doc:
|
| 78 |
-
if token.pos_ == "VERB" and token.dep_ in {"aux", "auxpass"}:
|
| 79 |
-
lemma = wordnet.morphy(token.text, wordnet.VERB) or token.text
|
| 80 |
-
corrected_text.append(lemma)
|
| 81 |
-
else:
|
| 82 |
-
corrected_text.append(token.text)
|
| 83 |
-
return ' '.join(corrected_text)
|
| 84 |
-
|
| 85 |
-
# Function to correct singular/plural errors
|
| 86 |
-
def correct_singular_plural_errors(text):
|
| 87 |
-
doc = nlp(text)
|
| 88 |
-
corrected_text = []
|
| 89 |
-
|
| 90 |
-
for token in doc:
|
| 91 |
-
if token.pos_ == "NOUN":
|
| 92 |
-
if token.tag_ == "NN": # Singular noun
|
| 93 |
-
if any(child.text.lower() in ['many', 'several', 'few'] for child in token.head.children):
|
| 94 |
-
corrected_text.append(token.lemma_ + 's')
|
| 95 |
-
else:
|
| 96 |
-
corrected_text.append(token.text)
|
| 97 |
-
elif token.tag_ == "NNS": # Plural noun
|
| 98 |
-
if any(child.text.lower() in ['a', 'one'] for child in token.head.children):
|
| 99 |
-
corrected_text.append(token.lemma_)
|
| 100 |
-
else:
|
| 101 |
-
corrected_text.append(token.text)
|
| 102 |
-
else:
|
| 103 |
-
corrected_text.append(token.text)
|
| 104 |
-
|
| 105 |
-
return ' '.join(corrected_text)
|
| 106 |
-
|
| 107 |
-
# Function to correct spelling errors
|
| 108 |
-
def correct_spelling(text):
|
| 109 |
-
words = text.split()
|
| 110 |
-
corrected_words = []
|
| 111 |
-
for word in words:
|
| 112 |
-
corrected_word = spell.correction(word)
|
| 113 |
-
corrected_words.append(corrected_word)
|
| 114 |
-
return ' '.join(corrected_words)
|
| 115 |
-
|
| 116 |
# Function to rephrase text and replace words with their synonyms while maintaining form
|
| 117 |
def rephrase_with_synonyms(text):
|
| 118 |
doc = nlp(text)
|
|
@@ -130,16 +56,9 @@ def rephrase_with_synonyms(text):
|
|
| 130 |
pos_tag = wordnet.ADV
|
| 131 |
|
| 132 |
if pos_tag:
|
| 133 |
-
synonyms =
|
| 134 |
if synonyms:
|
| 135 |
-
synonym = synonyms[0]
|
| 136 |
-
if token.pos_ == "VERB":
|
| 137 |
-
if token.tag_ == "VBG": # Present participle
|
| 138 |
-
synonym = synonym + 'ing'
|
| 139 |
-
elif token.tag_ in {"VBD", "VBN"}: # Past tense or past participle
|
| 140 |
-
synonym = synonym + 'ed'
|
| 141 |
-
elif token.tag_ == "VBZ": # Third-person singular present
|
| 142 |
-
synonym = synonym + 's'
|
| 143 |
rephrased_text.append(synonym)
|
| 144 |
else:
|
| 145 |
rephrased_text.append(token.text)
|
|
@@ -148,63 +67,64 @@ def rephrase_with_synonyms(text):
|
|
| 148 |
|
| 149 |
return ' '.join(rephrased_text)
|
| 150 |
|
| 151 |
-
# Function to paraphrase and correct
|
| 152 |
def paraphrase_and_correct(text):
|
| 153 |
-
#
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
paraphrased_text = force_first_letter_capital(paraphrased_text)
|
| 161 |
-
|
| 162 |
-
# Apply grammatical corrections
|
| 163 |
-
paraphrased_text = correct_singular_plural_errors(paraphrased_text)
|
| 164 |
-
paraphrased_text = correct_tense_errors(paraphrased_text)
|
| 165 |
-
|
| 166 |
-
# Rephrase with synonyms while maintaining grammatical forms
|
| 167 |
-
paraphrased_text = rephrase_with_synonyms(paraphrased_text)
|
| 168 |
-
|
| 169 |
-
# Correct spelling errors
|
| 170 |
-
paraphrased_text = correct_spelling(paraphrased_text)
|
| 171 |
-
|
| 172 |
-
return paraphrased_text
|
| 173 |
-
|
| 174 |
-
# FastAPI Endpoint for AI detection
|
| 175 |
-
@api_app.post("/ai-detection")
|
| 176 |
-
async def ai_detection(request: TextRequest):
|
| 177 |
-
label, score = predict_en(request.text)
|
| 178 |
return {"label": label, "score": score}
|
| 179 |
|
| 180 |
-
# FastAPI
|
| 181 |
-
@
|
| 182 |
-
async def paraphrase(
|
| 183 |
-
corrected_text = paraphrase_and_correct(
|
| 184 |
return {"corrected_text": corrected_text}
|
| 185 |
|
| 186 |
-
#
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 201 |
|
| 202 |
-
|
| 203 |
-
|
|
|
|
| 204 |
|
| 205 |
-
#
|
| 206 |
-
|
| 207 |
|
| 208 |
-
#
|
| 209 |
-
|
| 210 |
-
uvicorn.run(api_app, host="0.0.0.0", port=8000)
|
|
|
|
| 10 |
from pydantic import BaseModel
|
| 11 |
import uvicorn
|
| 12 |
|
| 13 |
+
# Initialize FastAPI
|
| 14 |
+
app = FastAPI()
|
| 15 |
|
| 16 |
# Initialize the English text classification pipeline for AI detection
|
| 17 |
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
|
|
|
|
| 30 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
|
| 31 |
nlp = spacy.load("en_core_web_sm")
|
| 32 |
|
| 33 |
+
# Define the input model for FastAPI (for validation)
|
| 34 |
+
class TextInput(BaseModel):
|
| 35 |
text: str
|
| 36 |
|
| 37 |
# Function to predict the label and score for English text (AI Detection)
|
|
|
|
| 39 |
res = pipeline_en(text)[0]
|
| 40 |
return res['label'], res['score']
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
# Function to rephrase text and replace words with their synonyms while maintaining form
|
| 43 |
def rephrase_with_synonyms(text):
|
| 44 |
doc = nlp(text)
|
|
|
|
| 56 |
pos_tag = wordnet.ADV
|
| 57 |
|
| 58 |
if pos_tag:
|
| 59 |
+
synonyms = get_synonyms_nltk(token.text, pos_tag)
|
| 60 |
if synonyms:
|
| 61 |
+
synonym = synonyms[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
rephrased_text.append(synonym)
|
| 63 |
else:
|
| 64 |
rephrased_text.append(token.text)
|
|
|
|
| 67 |
|
| 68 |
return ' '.join(rephrased_text)
|
| 69 |
|
| 70 |
+
# Function to paraphrase and correct text
|
| 71 |
def paraphrase_and_correct(text):
|
| 72 |
+
# [Place your processing logic here, such as removing redundant words, correcting grammar, etc.]
|
| 73 |
+
return rephrase_with_synonyms(text)
|
| 74 |
+
|
| 75 |
+
# Define FastAPI route for AI detection
|
| 76 |
+
@app.post("/ai-detect")
|
| 77 |
+
async def ai_detect(input: TextInput):
|
| 78 |
+
label, score = predict_en(input.text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
return {"label": label, "score": score}
|
| 80 |
|
| 81 |
+
# Define FastAPI route for paraphrasing and grammar correction
|
| 82 |
+
@app.post("/paraphrase")
|
| 83 |
+
async def paraphrase(input: TextInput):
|
| 84 |
+
corrected_text = paraphrase_and_correct(input.text)
|
| 85 |
return {"corrected_text": corrected_text}
|
| 86 |
|
| 87 |
+
# Function to get synonyms using NLTK WordNet
|
| 88 |
+
def get_synonyms_nltk(word, pos):
|
| 89 |
+
synsets = wordnet.synsets(word, pos=pos)
|
| 90 |
+
if synsets:
|
| 91 |
+
lemmas = synsets[0].lemmas()
|
| 92 |
+
return [lemma.name() for lemma in lemmas]
|
| 93 |
+
return []
|
| 94 |
+
|
| 95 |
+
# Set up Gradio UI
|
| 96 |
+
def gradio_ui():
|
| 97 |
+
with gr.Blocks() as demo:
|
| 98 |
+
with gr.Tab("AI Detection"):
|
| 99 |
+
t1 = gr.Textbox(lines=5, label='Text for AI Detection')
|
| 100 |
+
button1 = gr.Button("🤖 Predict AI Detection")
|
| 101 |
+
label1 = gr.Textbox(lines=1, label='Predicted Label')
|
| 102 |
+
score1 = gr.Textbox(lines=1, label='Prediction Score')
|
| 103 |
+
|
| 104 |
+
# Connect the prediction function to the Gradio UI
|
| 105 |
+
button1.click(fn=predict_en, inputs=t1, outputs=[label1, score1])
|
| 106 |
+
|
| 107 |
+
with gr.Tab("Paraphrasing & Grammar Correction"):
|
| 108 |
+
t2 = gr.Textbox(lines=5, label='Text for Paraphrasing and Grammar Correction')
|
| 109 |
+
button2 = gr.Button("🔄 Paraphrase and Correct")
|
| 110 |
+
result2 = gr.Textbox(lines=10, label='Corrected Text', placeholder="Corrected and paraphrased text will appear here")
|
| 111 |
+
|
| 112 |
+
# Connect the paraphrasing and correction function to the Gradio UI
|
| 113 |
+
button2.click(fn=paraphrase_and_correct, inputs=t2, outputs=result2)
|
| 114 |
+
|
| 115 |
+
# Start Gradio on port 7860 and share the app publicly
|
| 116 |
+
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|
| 117 |
+
|
| 118 |
+
# Run both FastAPI and Gradio concurrently
|
| 119 |
+
if __name__ == "__main__":
|
| 120 |
+
import multiprocessing
|
| 121 |
|
| 122 |
+
# Run FastAPI server in one process
|
| 123 |
+
fastapi_process = multiprocessing.Process(target=uvicorn.run, args=(app,), kwargs={"host": "0.0.0.0", "port": 8000})
|
| 124 |
+
fastapi_process.start()
|
| 125 |
|
| 126 |
+
# Run Gradio interface in another process
|
| 127 |
+
gradio_ui()
|
| 128 |
|
| 129 |
+
# When done, stop both processes
|
| 130 |
+
fastapi_process.join()
|
|
|