Update app.py
Browse files
app.py
CHANGED
|
@@ -359,28 +359,28 @@ def clean_text(text,doc=False,plain_text=False,url=False):
|
|
| 359 |
|
| 360 |
|
| 361 |
|
| 362 |
-
@st.cache
|
| 363 |
def get_spacy():
|
| 364 |
nlp = en_core_web_lg.load()
|
| 365 |
return nlp
|
| 366 |
|
| 367 |
-
@st.cache
|
| 368 |
def facebook_model():
|
| 369 |
|
| 370 |
summarizer = pipeline('summarization',model='facebook/bart-large-cnn')
|
| 371 |
return summarizer
|
| 372 |
|
| 373 |
-
@st.cache
|
| 374 |
def schleifer_model():
|
| 375 |
|
| 376 |
summarizer = pipeline('summarization',model='sshleifer/distilbart-cnn-12-6')
|
| 377 |
return summarizer
|
| 378 |
|
| 379 |
-
@st.cache
|
| 380 |
def get_sentence_embedding_model():
|
| 381 |
return SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
| 382 |
|
| 383 |
-
@st.cache(
|
| 384 |
def get_ner_pipeline():
|
| 385 |
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
|
| 386 |
model = AutoModelForTokenClassification.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
|
|
|
|
| 359 |
|
| 360 |
|
| 361 |
|
| 362 |
+
@st.cache(allow_output_mutation=True,suppress_st_warning=True)
|
| 363 |
def get_spacy():
|
| 364 |
nlp = en_core_web_lg.load()
|
| 365 |
return nlp
|
| 366 |
|
| 367 |
+
@st.cache(allow_output_mutation=True,suppress_st_warning=True)
|
| 368 |
def facebook_model():
|
| 369 |
|
| 370 |
summarizer = pipeline('summarization',model='facebook/bart-large-cnn')
|
| 371 |
return summarizer
|
| 372 |
|
| 373 |
+
@st.cache(allow_output_mutation=True,suppress_st_warning=True)
|
| 374 |
def schleifer_model():
|
| 375 |
|
| 376 |
summarizer = pipeline('summarization',model='sshleifer/distilbart-cnn-12-6')
|
| 377 |
return summarizer
|
| 378 |
|
| 379 |
+
@st.cache(allow_output_mutation=True,suppress_st_warning=True)
|
| 380 |
def get_sentence_embedding_model():
|
| 381 |
return SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
| 382 |
|
| 383 |
+
@st.cache(allow_output_mutation=True,suppress_st_warning=True)
|
| 384 |
def get_ner_pipeline():
|
| 385 |
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
|
| 386 |
model = AutoModelForTokenClassification.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
|