Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,6 +8,7 @@ from transformers import (
|
|
| 8 |
AutoModelForSequenceClassification,
|
| 9 |
pipeline
|
| 10 |
)
|
|
|
|
| 11 |
|
| 12 |
# ββββββββββββββββββββββ konfiguracja ββββββββββββββββββββββ
|
| 13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
@@ -22,8 +23,14 @@ sentiment_model = AutoModelForSequenceClassification.from_pretrained("absa-rober
|
|
| 22 |
sentiment_model.eval()
|
| 23 |
|
| 24 |
# TΕumaczenia
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
# Alias sΕownik
|
| 29 |
aspect_aliases = {
|
|
|
|
| 8 |
AutoModelForSequenceClassification,
|
| 9 |
pipeline
|
| 10 |
)
|
| 11 |
+
from transformers import MarianMTModel, MarianTokenizer
|
| 12 |
|
| 13 |
# ββββββββββββββββββββββ konfiguracja ββββββββββββββββββββββ
|
| 14 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 23 |
sentiment_model.eval()
|
| 24 |
|
| 25 |
# TΕumaczenia
|
| 26 |
+
|
| 27 |
+
pl_to_en_tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-pl-en")
|
| 28 |
+
pl_to_en_model = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-pl-en").to(device)
|
| 29 |
+
pl_to_en = pipeline("translation", model=pl_to_en_model, tokenizer=pl_to_en_tokenizer, device=0 if torch.cuda.is_available() else -1)
|
| 30 |
+
|
| 31 |
+
en_to_pl_tokenizer = MarianTokenizer.from_pretrained("gsarti/opus-mt-tc-en-pl")
|
| 32 |
+
en_to_pl_model = MarianMTModel.from_pretrained("gsarti/opus-mt-tc-en-pl").to(device)
|
| 33 |
+
en_to_pl = pipeline("translation", model=en_to_pl_model, tokenizer=en_to_pl_tokenizer, device=0 if torch.cuda.is_available() else -1)
|
| 34 |
|
| 35 |
# Alias sΕownik
|
| 36 |
aspect_aliases = {
|