Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from pydantic import BaseModel
|
| 3 |
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
|
|
|
|
| 4 |
from duckduckgo_search import DDGS
|
| 5 |
from fastapi.middleware.cors import CORSMiddleware
|
| 6 |
from mtranslate import translate
|
|
@@ -26,21 +27,17 @@ tokenizer_c, model_c = None, None
|
|
| 26 |
|
| 27 |
def load_models():
|
| 28 |
global tokenizer, model, bert_tokenizer, bert_model, tokenizer_c, model_c
|
| 29 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSequenceClassification
|
| 30 |
-
|
| 31 |
if tokenizer is None or model is None:
|
| 32 |
tokenizer = AutoTokenizer.from_pretrained("fatmata/gpt-psybot")
|
| 33 |
model = AutoModelForCausalLM.from_pretrained("fatmata/gpt-psybot")
|
| 34 |
-
|
| 35 |
if bert_tokenizer is None or bert_model is None:
|
| 36 |
bert_tokenizer = AutoTokenizer.from_pretrained("fatmata/bert_model")
|
| 37 |
bert_model = AutoModelForSequenceClassification.from_pretrained("fatmata/bert_model")
|
| 38 |
-
|
| 39 |
if tokenizer_c is None or model_c is None:
|
| 40 |
tokenizer_c = AutoTokenizer.from_pretrained("fatmata/mini_bert")
|
| 41 |
model_c = AutoModelForSequenceClassification.from_pretrained("fatmata/mini_bert")
|
| 42 |
|
| 43 |
-
# ---------------- Nettoyage
|
| 44 |
def clean_response(text):
|
| 45 |
text = re.sub(r'<[^>]+>', '', text)
|
| 46 |
text = re.split(r'</(Bot|name|opinion|User|[a-zA-Z]*)>', text)[0]
|
|
@@ -84,7 +81,7 @@ def search_duckduckgo(query, max_results=3):
|
|
| 84 |
try:
|
| 85 |
results = list(DDGS().text(query, max_results=max_results))
|
| 86 |
return [r["body"] for r in results if "body" in r] or ["Je n'ai pas trouvé d'informations."]
|
| 87 |
-
except
|
| 88 |
return [f"Erreur recherche : {str(e)}"]
|
| 89 |
|
| 90 |
def generate_response(user_input):
|
|
@@ -133,12 +130,10 @@ def classify_and_respond(text, original_lang):
|
|
| 133 |
response = search_duckduckgo(text)
|
| 134 |
translated_response = [translate(r, original_lang) for r in response]
|
| 135 |
return translated_response, "recherche", [], ["Recherche externe"]
|
| 136 |
-
|
| 137 |
compound, is_unacceptable, emotions = classify_emotion(text)
|
| 138 |
if is_unacceptable and abs(compound)>50:
|
| 139 |
alert = translate("Je ressens beaucoup de tension dans votre message. Essayez de vous calmer.", original_lang)
|
| 140 |
return [alert], "non acceptable", emotions, ["Emotion inacceptable"]
|
| 141 |
-
|
| 142 |
gpt_response = generate_response(text)
|
| 143 |
translated_gpt = translate(gpt_response, original_lang)
|
| 144 |
return [translated_gpt], "gpt", emotions, ["Réponse GPT"]
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from pydantic import BaseModel
|
| 3 |
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
|
| 4 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSequenceClassification
|
| 5 |
from duckduckgo_search import DDGS
|
| 6 |
from fastapi.middleware.cors import CORSMiddleware
|
| 7 |
from mtranslate import translate
|
|
|
|
| 27 |
|
| 28 |
def load_models():
|
| 29 |
global tokenizer, model, bert_tokenizer, bert_model, tokenizer_c, model_c
|
|
|
|
|
|
|
| 30 |
if tokenizer is None or model is None:
|
| 31 |
tokenizer = AutoTokenizer.from_pretrained("fatmata/gpt-psybot")
|
| 32 |
model = AutoModelForCausalLM.from_pretrained("fatmata/gpt-psybot")
|
|
|
|
| 33 |
if bert_tokenizer is None or bert_model is None:
|
| 34 |
bert_tokenizer = AutoTokenizer.from_pretrained("fatmata/bert_model")
|
| 35 |
bert_model = AutoModelForSequenceClassification.from_pretrained("fatmata/bert_model")
|
|
|
|
| 36 |
if tokenizer_c is None or model_c is None:
|
| 37 |
tokenizer_c = AutoTokenizer.from_pretrained("fatmata/mini_bert")
|
| 38 |
model_c = AutoModelForSequenceClassification.from_pretrained("fatmata/mini_bert")
|
| 39 |
|
| 40 |
+
# ---------------- Nettoyage ----------------
|
| 41 |
def clean_response(text):
|
| 42 |
text = re.sub(r'<[^>]+>', '', text)
|
| 43 |
text = re.split(r'</(Bot|name|opinion|User|[a-zA-Z]*)>', text)[0]
|
|
|
|
| 81 |
try:
|
| 82 |
results = list(DDGS().text(query, max_results=max_results))
|
| 83 |
return [r["body"] for r in results if "body" in r] or ["Je n'ai pas trouvé d'informations."]
|
| 84 |
+
except:
|
| 85 |
return [f"Erreur recherche : {str(e)}"]
|
| 86 |
|
| 87 |
def generate_response(user_input):
|
|
|
|
| 130 |
response = search_duckduckgo(text)
|
| 131 |
translated_response = [translate(r, original_lang) for r in response]
|
| 132 |
return translated_response, "recherche", [], ["Recherche externe"]
|
|
|
|
| 133 |
compound, is_unacceptable, emotions = classify_emotion(text)
|
| 134 |
if is_unacceptable and abs(compound)>50:
|
| 135 |
alert = translate("Je ressens beaucoup de tension dans votre message. Essayez de vous calmer.", original_lang)
|
| 136 |
return [alert], "non acceptable", emotions, ["Emotion inacceptable"]
|
|
|
|
| 137 |
gpt_response = generate_response(text)
|
| 138 |
translated_gpt = translate(gpt_response, original_lang)
|
| 139 |
return [translated_gpt], "gpt", emotions, ["Réponse GPT"]
|