Spaces:
Runtime error
Runtime error
Update main.py
Browse files
main.py
CHANGED
|
@@ -17,15 +17,6 @@ from nltk.stem import WordNetLemmatizer
|
|
| 17 |
def remove_urls(text):
|
| 18 |
return re.sub(r'http[s]?://\S+', '', text)
|
| 19 |
|
| 20 |
-
# Function to remove punctuations from text
|
| 21 |
-
def remove_punctuation(text):
|
| 22 |
-
regular_punct = string.punctuation
|
| 23 |
-
return str(re.sub(r'['+regular_punct+']', '', str(text)))
|
| 24 |
-
|
| 25 |
-
# Function to convert the text into lower case
|
| 26 |
-
def lower_case(text):
|
| 27 |
-
return text.lower()
|
| 28 |
-
|
| 29 |
# Function to lemmatize text
|
| 30 |
def lemmatize(text):
|
| 31 |
wordnet_lemmatizer = WordNetLemmatizer()
|
|
@@ -34,9 +25,17 @@ def lemmatize(text):
|
|
| 34 |
lemma_txt = ''
|
| 35 |
for w in tokens:
|
| 36 |
lemma_txt = lemma_txt + wordnet_lemmatizer.lemmatize(w) + ' '
|
| 37 |
-
|
| 38 |
return lemma_txt
|
| 39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
def preprocess_text(text):
|
| 41 |
# Preprocess the input text
|
| 42 |
text = remove_urls(text)
|
|
@@ -57,9 +56,9 @@ async def lifespan(app: FastAPI):
|
|
| 57 |
del sentiment_task
|
| 58 |
|
| 59 |
description = """
|
| 60 |
-
##
|
| 61 |
-
|
| 62 |
-
Check out the docs for the `/
|
| 63 |
"""
|
| 64 |
|
| 65 |
# Initialize the FastAPI app
|
|
@@ -78,7 +77,7 @@ async def welcome():
|
|
| 78 |
MAX_TEXT_LENGTH = 1000
|
| 79 |
|
| 80 |
# Define the sentiment analysis endpoint
|
| 81 |
-
@app.post('/
|
| 82 |
async def classify_text(text_input:TextInput):
|
| 83 |
try:
|
| 84 |
# Convert input data to JSON serializable dictionary
|
|
|
|
| 17 |
def remove_urls(text):
|
| 18 |
return re.sub(r'http[s]?://\S+', '', text)
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
# Function to lemmatize text
|
| 21 |
def lemmatize(text):
|
| 22 |
wordnet_lemmatizer = WordNetLemmatizer()
|
|
|
|
| 25 |
lemma_txt = ''
|
| 26 |
for w in tokens:
|
| 27 |
lemma_txt = lemma_txt + wordnet_lemmatizer.lemmatize(w) + ' '
|
|
|
|
| 28 |
return lemma_txt
|
| 29 |
|
| 30 |
+
# Function to remove punctuations from text
|
| 31 |
+
def remove_punctuation(text):
|
| 32 |
+
regular_punct = string.punctuation
|
| 33 |
+
return str(re.sub(r'['+regular_punct+']', '', str(text)))
|
| 34 |
+
|
| 35 |
+
# Function to convert the text into lower case
|
| 36 |
+
def lower_case(text):
|
| 37 |
+
return text.lower()
|
| 38 |
+
|
| 39 |
def preprocess_text(text):
|
| 40 |
# Preprocess the input text
|
| 41 |
text = remove_urls(text)
|
|
|
|
| 56 |
del sentiment_task
|
| 57 |
|
| 58 |
description = """
|
| 59 |
+
## This API provides text classification capabilities using a pre-trained model for sentiment analysis.
|
| 60 |
+
It allows users to analyze the sentiment of text inputs and obtain the corresponding sentiment labels.
|
| 61 |
+
Check out the docs for the `/input/{text}` endpoint below to try it out!
|
| 62 |
"""
|
| 63 |
|
| 64 |
# Initialize the FastAPI app
|
|
|
|
| 77 |
MAX_TEXT_LENGTH = 1000
|
| 78 |
|
| 79 |
# Define the sentiment analysis endpoint
|
| 80 |
+
@app.post('/input/{text}')
|
| 81 |
async def classify_text(text_input:TextInput):
|
| 82 |
try:
|
| 83 |
# Convert input data to JSON serializable dictionary
|