Spaces:
Sleeping
Sleeping
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -198,13 +198,13 @@ def add_word(english, wtype, article, german):
|
|
| 198 |
if wtype == "noun" and not article:
|
| 199 |
return "❌ Please provide the article for nouns.", get_words_table_en_de(), get_words_table_de_en()
|
| 200 |
|
| 201 |
-
eng_data = load_english() # { english_word: {
|
| 202 |
ger_data = load_german() # { german_word: {artikel, type, translations:[english,...]} }
|
| 203 |
|
| 204 |
# --- Update ENGLISH side ---
|
| 205 |
if english not in eng_data:
|
| 206 |
eng_data[english] = {
|
| 207 |
-
"artikel": (article if wtype == "noun" else ""),
|
| 208 |
"type": wtype,
|
| 209 |
"translations": [german]
|
| 210 |
}
|
|
@@ -212,6 +212,8 @@ def add_word(english, wtype, article, german):
|
|
| 212 |
# keep existing artikel/type (don’t override silently)
|
| 213 |
if german not in eng_data[english].get("translations", []):
|
| 214 |
eng_data[english]["translations"].append(german)
|
|
|
|
|
|
|
| 215 |
|
| 216 |
# --- Update GERMAN side ---
|
| 217 |
if german not in ger_data:
|
|
@@ -223,6 +225,10 @@ def add_word(english, wtype, article, german):
|
|
| 223 |
else:
|
| 224 |
if english not in ger_data[german].get("translations", []):
|
| 225 |
ger_data[german]["translations"].append(english)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 226 |
|
| 227 |
save_english(eng_data)
|
| 228 |
save_german(ger_data)
|
|
@@ -234,14 +240,16 @@ def toggle_article_input(word_type):
|
|
| 234 |
|
| 235 |
def get_words_table_en_de(search_query=""):
|
| 236 |
eng_data = load_english() # Load fresh
|
|
|
|
| 237 |
filtered = {}
|
| 238 |
for eng_word, meta in sorted(eng_data.items(), key=lambda kv: kv[0].lower()):
|
| 239 |
translations = meta.get("translations", [])
|
| 240 |
# check if search matches the word or any translation
|
| 241 |
if search_query.lower() in eng_word.lower() or any(search_query.lower() in t.lower() for t in translations):
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
|
|
|
| 245 |
return list(filtered.values())
|
| 246 |
|
| 247 |
def get_words_table_de_en(search_query=""):
|
|
@@ -259,6 +267,19 @@ def get_words_table_de_en(search_query=""):
|
|
| 259 |
def reset_filter():
|
| 260 |
return "", get_words_table_en_de(), get_words_table_de_en()
|
| 261 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 262 |
def sync_from_hf():
|
| 263 |
en_de_cache_path = hf_hub_download(repo_id=DATASET_REPO, filename="en_de.json", repo_type="dataset")
|
| 264 |
de_en_cache_path = hf_hub_download(repo_id=DATASET_REPO, filename="de_en.json", repo_type="dataset")
|
|
|
|
| 198 |
if wtype == "noun" and not article:
|
| 199 |
return "❌ Please provide the article for nouns.", get_words_table_en_de(), get_words_table_de_en()
|
| 200 |
|
| 201 |
+
eng_data = load_english() # { english_word: {type, translations:[german,...]} }
|
| 202 |
ger_data = load_german() # { german_word: {artikel, type, translations:[english,...]} }
|
| 203 |
|
| 204 |
# --- Update ENGLISH side ---
|
| 205 |
if english not in eng_data:
|
| 206 |
eng_data[english] = {
|
| 207 |
+
#"artikel": (article if wtype == "noun" else ""),
|
| 208 |
"type": wtype,
|
| 209 |
"translations": [german]
|
| 210 |
}
|
|
|
|
| 212 |
# keep existing artikel/type (don’t override silently)
|
| 213 |
if german not in eng_data[english].get("translations", []):
|
| 214 |
eng_data[english]["translations"].append(german)
|
| 215 |
+
if not eng_data[english].get("type"):
|
| 216 |
+
eng_data[english]["type"] = wtype
|
| 217 |
|
| 218 |
# --- Update GERMAN side ---
|
| 219 |
if german not in ger_data:
|
|
|
|
| 225 |
else:
|
| 226 |
if english not in ger_data[german].get("translations", []):
|
| 227 |
ger_data[german]["translations"].append(english)
|
| 228 |
+
if wtype == "noun" and not (ger_data[german].get("artikel") or "").strip():
|
| 229 |
+
ger_data[german]["artikel"] = article
|
| 230 |
+
if not ger_data[german].get("type"):
|
| 231 |
+
ger_data[german]["type"] = wtype
|
| 232 |
|
| 233 |
save_english(eng_data)
|
| 234 |
save_german(ger_data)
|
|
|
|
| 240 |
|
| 241 |
def get_words_table_en_de(search_query=""):
|
| 242 |
eng_data = load_english() # Load fresh
|
| 243 |
+
ger_data = load_german() # German→English (artikel lives here)
|
| 244 |
filtered = {}
|
| 245 |
for eng_word, meta in sorted(eng_data.items(), key=lambda kv: kv[0].lower()):
|
| 246 |
translations = meta.get("translations", [])
|
| 247 |
# check if search matches the word or any translation
|
| 248 |
if search_query.lower() in eng_word.lower() or any(search_query.lower() in t.lower() for t in translations):
|
| 249 |
+
arts = _articles_for_translations(translations, ger_data)
|
| 250 |
+
artikel_text = ", ".join(arts) if arts else "" # ✅ Bug 1: no null
|
| 251 |
+
translations_text = ", ".join(translations) # ✅ Bug 2: join translations
|
| 252 |
+
filtered[eng_word] = [eng_word, artikel_text, translations_text, meta.get("type", "")]
|
| 253 |
return list(filtered.values())
|
| 254 |
|
| 255 |
def get_words_table_de_en(search_query=""):
|
|
|
|
| 267 |
def reset_filter():
|
| 268 |
return "", get_words_table_en_de(), get_words_table_de_en()
|
| 269 |
|
| 270 |
+
def _articles_for_translations(translations, ger_data):
|
| 271 |
+
order = {"der": 0, "die": 1, "das": 2}
|
| 272 |
+
seen = set()
|
| 273 |
+
arts = []
|
| 274 |
+
for g in translations:
|
| 275 |
+
art = (ger_data.get(g, {}).get("artikel") or "").strip().lower()
|
| 276 |
+
if art and art not in seen:
|
| 277 |
+
seen.add(art)
|
| 278 |
+
arts.append(art)
|
| 279 |
+
# sort to show as der, die, das (when applicable)
|
| 280 |
+
arts.sort(key=lambda a: order.get(a, 99))
|
| 281 |
+
return arts
|
| 282 |
+
|
| 283 |
def sync_from_hf():
|
| 284 |
en_de_cache_path = hf_hub_download(repo_id=DATASET_REPO, filename="en_de.json", repo_type="dataset")
|
| 285 |
de_en_cache_path = hf_hub_download(repo_id=DATASET_REPO, filename="de_en.json", repo_type="dataset")
|