TiberiuCristianLeon commited on
Commit
0f855be
·
verified ·
1 Parent(s): 55b711b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -694
app.py CHANGED
@@ -8,15 +8,12 @@ import httpx, os
8
  import polars as pl
9
 
10
  hflogging.set_verbosity_error()
11
- favourite_langs = {"German": "de", "Romanian": "ro", "English": "en", "-----": "-----"}
12
  df = pl.read_parquet("isolanguages.parquet")
13
  non_empty_isos = df.slice(1).filter(pl.col("ISO639-1") != "").rows()
14
  # all_langs = languagecodes.iso_languages_byname
15
  all_langs = {iso[0]: (iso[1], iso[2], iso[3]) for iso in non_empty_isos} # {'Romanian': ('ro', 'rum', 'ron')}
16
  iso1toall = {iso[1]: (iso[0], iso[2], iso[3]) for iso in non_empty_isos} # {'ro': ('Romanian', 'rum', 'ron')}
17
- langs = list(favourite_langs.keys())
18
- langs.extend(list(all_langs.keys())) # Language options as list, add favourite languages first
19
-
20
  models = ["Helsinki-NLP", "QUICKMT", "Argos", "HPLT", "HPLT-OPUS", "Google",
21
  "Helsinki-NLP/opus-mt-tc-bible-big-mul-mul", "Helsinki-NLP/opus-mt-tc-bible-big-mul-deu_eng_nld",
22
  "Helsinki-NLP/opus-mt-tc-bible-big-mul-deu_eng_fra_por_spa", "Helsinki-NLP/opus-mt-tc-bible-big-deu_eng_fra_por_spa-mul",
@@ -50,714 +47,44 @@ def timer(func):
50
  return translated_text, message_text
51
  return translate
52
 
53
- def model_to_cuda(model):
54
- # Move the model to GPU if available
55
- if torch.cuda.is_available():
56
- model = model.to('cuda')
57
- print("CUDA is available! Using GPU.")
58
- else:
59
- print("CUDA not available! Using CPU.")
60
- return model
61
-
62
- def HelsinkiNLPAutoTokenizer(sl, tl, input_text): # deprecated
63
- if model_name == "Helsinki-NLP":
64
- message_text = f'Translated from {sl} to {tl} with {model_name}.'
65
- try:
66
- model_name = f"Helsinki-NLP/opus-mt-{sl}-{tl}"
67
- tokenizer = AutoTokenizer.from_pretrained(model_name)
68
- model = model_to_cuda(AutoModelForSeq2SeqLM.from_pretrained(model_name))
69
- except EnvironmentError:
70
- try:
71
- model_name = f"Helsinki-NLP/opus-tatoeba-{sl}-{tl}"
72
- tokenizer = AutoTokenizer.from_pretrained(model_name)
73
- model = model_to_cuda(AutoModelForSeq2SeqLM.from_pretrained(model_name))
74
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
75
- output_ids = model.generate(input_ids, max_length=512)
76
- translated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
77
- return translated_text, message_text
78
- except EnvironmentError as error:
79
- return f"Error finding model: {model_name}! Try other available language combination.", error
80
-
81
- class Translators:
82
- def __init__(self, model_name: str, sl: str, tl: str, input_text: str):
83
- self.model_name = model_name
84
- self.sl, self.tl = sl, tl
85
- self.input_text = input_text
86
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
87
- self.max_new_tokens = 512
88
-
89
- def google(self):
90
- self.input_text = " ".join(self.input_text.split())
91
- url = os.environ['GCLIENT'] + f'sl={self.sl}&tl={self.tl}&q={self.input_text}'
92
- response = httpx.get(url)
93
- return response.json()[0][0][0]
94
-
95
- def simplepipe(self):
96
- try:
97
- pipe = pipeline("translation", model=self.model_name, device=self.device)
98
- translation = pipe(self.input_text)
99
- message = f'Translated from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]} with {self.model_name}.'
100
- return translation[0]['translation_text'], message
101
- except Exception as error:
102
- return f"Error translating with model: {self.model_name}! Try other available language combination or model.", error
103
-
104
- def mitre(self):
105
- from transformers import AutoModel, AutoTokenizer
106
- tokenizer = AutoTokenizer.from_pretrained(self.model_name, trust_remote_code=True, use_fast=False)
107
- model = AutoModel.from_pretrained(self.model_name, trust_remote_code=True).to(self.device)
108
- model.eval()
109
- # Translating from one or several sentences to a sole language
110
- src_tokens = tokenizer.encode_source_tokens_to_input_ids(self.input_text, target_language=self.tl)
111
- with torch.inference_mode(): # no_grad inference_mode
112
- generated_tokens = model.generate(src_tokens)
113
- result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
114
- return result
115
-
116
- def rosetta(self):
117
- model = AutoModelForCausalLM.from_pretrained(
118
- self.model_name,
119
- dtype=torch.bfloat16, # float32 slow
120
- low_cpu_mem_usage=False, # True
121
- device_map="auto")
122
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
123
- system = f"Translate the user's text to {self.tl}. Provide the final translation in a formal tone immediately immediately without any other text."
124
- messages = [
125
- {"role": "system", "content": system},
126
- {"role": "user", "content": self.input_text},
127
- ]
128
- prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
129
- inputs = tokenizer(prompt, return_tensors="pt").to(self.device)
130
- input_length = inputs["input_ids"].shape[1]
131
- model.eval()
132
- with torch.inference_mode():
133
- outputs = model.generate(
134
- **inputs,
135
- max_new_tokens=self.max_new_tokens,
136
- )
137
- generated_tokens = outputs[0][input_length:]
138
- translation = tokenizer.decode(generated_tokens, skip_special_tokens=True)
139
- return translation
140
-
141
- def niutrans(self):
142
- tokenizer = AutoTokenizer.from_pretrained(self.model_name, padding_side='left')
143
- model = AutoModelForCausalLM.from_pretrained(self.model_name)
144
- prompt = f"Translate the following text from {self.sl} into {self.tl}.\n{self.sl}: {self.input_text}.\n{self.tl}: "
145
- messages = [{"role": "user", "content": prompt}]
146
- text = tokenizer.apply_chat_template(
147
- messages,
148
- tokenize=False,
149
- add_generation_prompt=True,
150
- )
151
- model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
152
- generated_ids = model.generate(**model_inputs, max_new_tokens=512, num_beams=5, do_sample=False)
153
- output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
154
- outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
155
- outputs = ''.join(outputs) if isinstance(outputs, list) else outputs
156
- return outputs
157
-
158
- def salamandratapipe(self):
159
- pipe = pipeline("text-generation", model=self.model_name)
160
- messages = [{"role": "user", "content": f"Translate the following text from {self.sl} into {self.tl}.\n{self.sl}: {self.input_text} \n{self.tl}:"}]
161
- return pipe(messages, max_new_tokens=self.max_new_tokens, early_stopping=True, num_beams=5)[0]["generated_text"][1]["content"]
162
-
163
- def hplt(self, opus = False):
164
- # langs = ['ar', 'bs', 'ca', 'en', 'et', 'eu', 'fi', 'ga', 'gl', 'hi', 'hr', 'is', 'mt', 'nn', 'sq', 'sw', 'zh_hant']
165
- hplt_models = ['ar-en', 'bs-en', 'ca-en', 'en-ar', 'en-bs', 'en-ca', 'en-et', 'en-eu', 'en-fi',
166
- 'en-ga', 'en-gl', 'en-hi', 'en-hr', 'en-is', 'en-mt', 'en-nn', 'en-sq', 'en-sw',
167
- 'en-zh_hant', 'et-en', 'eu-en', 'fi-en', 'ga-en', 'gl-en', 'hi-en', 'hr-en',
168
- 'is-en', 'mt-en', 'nn-en', 'sq-en', 'sw-en', 'zh_hant-en']
169
- lang_map = {"zh": "zh_hant"}
170
- self.sl = lang_map.get(self.sl, self.sl)
171
- self.tl = lang_map.get(self.tl, self.tl)
172
- if opus:
173
- hplt_model = f'HPLT/translate-{self.sl}-{self.tl}-v1.0-hplt_opus' # HPLT/translate-en-hr-v1.0-hplt_opus
174
- else:
175
- hplt_model = f'HPLT/translate-{self.sl}-{self.tl}-v1.0-hplt' # HPLT/translate-en-hr-v1.0-hplt
176
- if f'{self.sl}-{self.tl}' in hplt_models:
177
- pipe = pipeline("translation", model=hplt_model, device=self.device)
178
- translation = pipe(self.input_text)
179
- translated_text = translation[0]['translation_text']
180
- message_text = f'Translated from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]} with {hplt_model}.'
181
- else:
182
- translated_text = f'HPLT model from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]} not available!'
183
- message_text = f"Available models: {', '.join(hplt_models)}"
184
- return translated_text, message_text
185
-
186
- @staticmethod
187
- def download_argos_model(available_packages, from_code, to_code):
188
- import argostranslate.package
189
- print('Downloading model for', from_code, to_code)
190
- # Download and install Argos Translate package from path
191
- package_to_install = next(
192
- filter(lambda x: x.from_code == from_code and x.to_code == to_code, available_packages)
193
- )
194
- argostranslate.package.install_from_path(package_to_install.download())
195
-
196
- def argos(self):
197
- import argostranslate.translate, argostranslate.package
198
- argostranslate.package.update_package_index()
199
- available_packages = argostranslate.package.get_available_packages()
200
- available_slanguages = [lang.from_code for lang in available_packages]
201
- available_tlanguages = [lang.to_code for lang in available_packages]
202
- available_languages = sorted(list(set(available_slanguages + available_tlanguages)))
203
- combos: tuple[str|str] = sorted(list(zip(available_slanguages, available_tlanguages)))
204
- packages_info = ', '.join(f"{pkg.from_name} ({pkg.from_code}) -> {pkg.to_name} ({pkg.to_code})" for pkg in available_packages)
205
- # print(available_languages, combos, packages_info)
206
- if self.sl not in available_languages and self.tl not in available_languages:
207
- translated_text = f'''No supported Argos model available from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]}!
208
- Try other model or languages combination from the available Argos models: {', '.join(available_languages)}.'''
209
- else:
210
- try:
211
- if (self.sl, self.tl) in combos:
212
- self.__class__.download_argos_model(available_packages, self.sl, self.tl) # Download model
213
- translated_text = argostranslate.translate.translate(self.input_text, self.sl, self.tl) # Direct translation
214
- elif (self.sl, 'en') in combos and ('en', self.tl) in combos:
215
- self.__class__.download_argos_model(available_packages, self.sl, 'en') # Download model
216
- translated_pivottext = argostranslate.translate.translate(self.input_text, self.sl, 'en') # Translate to pivot language English
217
- self.__class__.download_argos_model(available_packages, 'en', self.tl) # Download model
218
- translated_text = argostranslate.translate.translate(translated_pivottext, 'en', self.tl) # Translate from pivot language English
219
- message = f'Translated from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]} with Argos using pivot language English.'
220
- else:
221
- translated_text = f"No Argos model for {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]}. Try other model or languages combination from the available Argos models: {packages_info}."
222
- except StopIteration as IterationError:
223
- # packages_info = ', '.join(f"{pkg.get_description()}->{str(pkg.links)} {str(pkg.source_languages)}" for pkg in available_packages)
224
- translated_text = f"No Argos model for {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]}. Error: {IterationError}. Try other model or languages combination from the available Argos models: {packages_info}."
225
- except Exception as generalerror:
226
- translated_text = f"General error: {generalerror}"
227
- return translated_text
228
-
229
- @staticmethod
230
- def quickmttranslate(model_path, input_text):
231
- from quickmt import Translator
232
- # 'auto' auto-detects GPU, set to "cpu" to force CPU inference
233
- # device = 'gpu' if torch.cuda.is_available() else 'cpu'
234
- translator = Translator(str(model_path), device="auto", compute_type="auto")
235
- # translation = Translator(f"./quickmt-{self.sl}-{self.tl}/", device="auto/cpu", intra_threads=2, inter_threads=2, compute_type="int8")
236
- # ctranslate2._ext.Translator(model_path: str, device: str = 'cpu', *, device_index: Union[int, List[int]] = 0, compute_type: Union[str, Dict[str, str]] = 'default',
237
- # inter_threads: int = 1, intra_threads: int = 0, max_queued_batches: int = 0, flash_attention: bool = False, tensor_parallel: bool = False, files: object = None)
238
- # Options for compute_type: default, auto, int8, int8_float32, int8_float16, int8_bfloat16, int16, float16, bfloat16, float32
239
- # "int8" will work well for inference on CPU and give "int8_float16" or "int8_bfloat16" a try for GPU inference.
240
-
241
- # (self: ctranslate2._ext.Translator, source: List[List[str]], target_prefix: Optional[List[Optional[List[str]]]] = None, *, max_batch_size: int = 0,
242
- # batch_type: str = 'examples', asynchronous: bool = False, beam_size: int = 2, patience: float = 1, num_hypotheses: int = 1, length_penalty: float = 1,
243
- # coverage_penalty: float = 0, repetition_penalty: float = 1, no_repeat_ngram_size: int = 0, disable_unk: bool = False,
244
- # suppress_sequences: Optional[List[List[str]]] = None, end_token: Optional[Union[str, List[str], List[int]]] = None, return_end_token: bool = False,
245
- # prefix_bias_beta: float = 0, max_input_length: int = 1024, max_decoding_length: int = 256, min_decoding_length: int = 1, use_vmap: bool = False,
246
- # return_scores: bool = False, return_logits_vocab: bool = False, return_attention: bool = False, return_alternatives: bool = False,
247
- # min_alternative_expansion_prob: float = 0, sampling_topk: int = 1, sampling_topp: float = 1, sampling_temperature: float = 1, replace_unknowns: bool = False,
248
- # callback: Callable[[ctranslate2._ext.GenerationStepResult], bool] = None) -> Union[List[ctranslate2._ext.TranslationResult], List[ctranslate2._ext.AsyncTranslationResult]]
249
- # set beam size to 1 for faster speed (but lower quality) device="auto/cpu/gpu"
250
- translation = translator(input_text, beam_size=5, max_input_length = 512, max_decoding_length = 512)
251
- # print(model_path, input_text, translation)
252
- return translation
253
-
254
- @staticmethod
255
- def quickmtdownload(model_name):
256
- from quickmt.hub import hf_download
257
- from pathlib import Path
258
- model_path = Path("/quickmt/models") / model_name
259
- if not model_path.exists():
260
- hf_download(
261
- model_name = f"quickmt/{model_name}",
262
- output_dir=Path("/quickmt/models") / model_name,
263
- )
264
- return model_path
265
-
266
- def quickmt(self):
267
- model_name = f"quickmt-{self.sl}-{self.tl}"
268
- # from quickmt.hub import hf_list
269
- # quickmt_models = [i.split("/quickmt-")[1] for i in hf_list()]
270
- # quickmt_models.sort()
271
- quickmt_models = ['ar-en', 'bn-en', 'cs-en', 'da-en', 'de-en', 'el-en', 'en-ar', 'en-bn',
272
- 'en-cs', 'en-da', 'en-de', 'en-el', 'en-es', 'en-fa', 'en-fr', 'en-he',
273
- 'en-hi', 'en-hu', 'en-id', 'en-is', 'en-it', 'en-ja', 'en-ko', 'en-lv', 'en-pl',
274
- 'en-pt', 'en-ro', 'en-ru', 'en-sv', 'en-th', 'en-tr', 'en-ur', 'en-vi',
275
- 'en-zh', 'es-en', 'fa-en', 'fr-en', 'he-en', 'hi-en', 'hu-en', 'id-en',
276
- 'is-en', 'it-en', 'ja-en', 'ko-en', 'lv-en', 'pl-en', 'pt-en', 'ro-en', 'ru-en',
277
- 'th-en', 'tr-en', 'ur-en', 'vi-en', 'zh-en']
278
- # available_languages = list(set([lang for model in quickmt_models for lang in model.split('-')]))
279
- # available_languages.sort()
280
- available_languages = ['ar', 'bn', 'cs', 'da', 'de', 'el', 'en', 'es', 'fa', 'fr', 'he',
281
- 'hi', 'hu', 'id', 'it', 'is', 'ja', 'ko', 'lv', 'pl', 'pt', 'ro', 'ru',
282
- 'sv', 'th', 'tr', 'ur', 'vi', 'zh']
283
- # print(quickmt_models, available_languages)
284
- # Direct translation model
285
- if f"{self.sl}-{self.tl}" in quickmt_models:
286
- model_path = Translators.quickmtdownload(model_name)
287
- translated_text = Translators.quickmttranslate(model_path, self.input_text)
288
- message = f'Translated from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]} with {model_name}.'
289
- # Pivot language English
290
- elif self.sl in available_languages and self.tl in available_languages:
291
- model_name = f"quickmt-{self.sl}-en"
292
- model_path = Translators.quickmtdownload(model_name)
293
- entranslation = Translators.quickmttranslate(model_path, self.input_text)
294
- model_name = f"quickmt-en-{self.tl}"
295
- model_path = Translators.quickmtdownload(model_name)
296
- translated_text = Translators.quickmttranslate(model_path, entranslation)
297
- message = f'Translated from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]} with Quickmt using pivot language English.'
298
- else:
299
- translated_text = f'No Quickmt model available for translation from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]}!'
300
- message = f"Available models: {', '.join(quickmt_models)}"
301
- return translated_text, message
302
-
303
- def HelsinkiNLP_mulroa(self):
304
- try:
305
- pipe = pipeline("translation", model=self.model_name, device=self.device)
306
- tgt_lang = iso1toall.get(self.tl)[2] # 'deu', 'ron', 'eng', 'fra'
307
- translation = pipe(f'>>{tgt_lang}<< {self.input_text}')
308
- return translation[0]['translation_text'], f'Translated from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]} with {self.model_name}.'
309
- except Exception as error:
310
- return f"Error translating with model: {self.model_name}! Try other available language combination.", error
311
-
312
- def HelsinkiNLP(self):
313
- try: # Standard bilingual model
314
- model_name = f"Helsinki-NLP/opus-mt-{self.sl}-{self.tl}"
315
- pipe = pipeline("translation", model=model_name, device=self.device)
316
- translation = pipe(self.input_text)
317
- return translation[0]['translation_text'], f'Translated from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]} with {model_name}.'
318
- except EnvironmentError:
319
- try: # Tatoeba models
320
- model_name = f"Helsinki-NLP/opus-tatoeba-{self.sl}-{self.tl}"
321
- pipe = pipeline("translation", model=model_name, device=self.device)
322
- translation = pipe(self.input_text)
323
- return translation[0]['translation_text'], f'Translated from {iso1toall[self.sl][0]} to {iso1toall[self.tl][0]} with {model_name}.'
324
- except EnvironmentError as error:
325
- self.model_name = "Helsinki-NLP/opus-mt-tc-bible-big-mul-mul" # Last resort: try multi to multi
326
- return self.HelsinkiNLP_mulroa()
327
- except KeyError as error:
328
- return f"Error: Translation direction {self.sl} to {self.tl} is not supported by Helsinki Translation Models", error
329
-
330
- def madlad(self):
331
- model = T5ForConditionalGeneration.from_pretrained(self.model_name, device_map="auto")
332
- tokenizer = T5Tokenizer.from_pretrained(self.model_name)
333
- text = f"<2{self.tl}> {self.input_text}"
334
- # input_ids = tokenizer(text, return_tensors="pt").input_ids.to(model.device)
335
- # outputs = model.generate(input_ids=input_ids, max_new_tokens=512)
336
- # return tokenizer.decode(outputs[0], skip_special_tokens=True)
337
- # return tokenizer.batch_decode(outputs, skip_special_tokens=True)
338
- # Use a pipeline as a high-level helper
339
- translator = pipeline('translation', model=model, tokenizer=tokenizer, src_lang=self.sl, tgt_lang=self.tl)
340
- translated_text = translator(text, max_length=512)
341
- return translated_text[0]['translation_text']
342
-
343
- def flan(self):
344
- tokenizer = T5Tokenizer.from_pretrained(self.model_name, legacy=False)
345
- model = T5ForConditionalGeneration.from_pretrained(self.model_name)
346
- prompt = f"translate {self.sl} to {self.tl}: {self.input_text}"
347
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids
348
- outputs = model.generate(input_ids)
349
- return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
350
-
351
- def tfive(self):
352
- tokenizer = T5Tokenizer.from_pretrained(self.model_name)
353
- model = T5ForConditionalGeneration.from_pretrained(self.model_name, device_map="auto")
354
- prompt = f"translate {self.sl} to {self.tl}: {self.input_text}"
355
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
356
- output_ids = model.generate(input_ids, max_length=512)
357
- translated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True).strip()
358
- return translated_text
359
-
360
- def mbart_many_to_many(self):
361
- from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
362
- model = MBartForConditionalGeneration.from_pretrained(self.model_name)
363
- tokenizer = MBart50TokenizerFast.from_pretrained(self.model_name)
364
- # translate source to target
365
- tokenizer.src_lang = languagecodes.mbart_large_languages[self.sl]
366
- encoded = tokenizer(self.input_text, return_tensors="pt")
367
- generated_tokens = model.generate(
368
- **encoded,
369
- forced_bos_token_id=tokenizer.lang_code_to_id[languagecodes.mbart_large_languages[self.tl]]
370
- )
371
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
372
-
373
- def mbart_one_to_many(self):
374
- # translate from English
375
- from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
376
- model = MBartForConditionalGeneration.from_pretrained(self.model_name)
377
- tokenizer = MBart50TokenizerFast.from_pretrained(self.model_name, src_lang="en_XX")
378
- model_inputs = tokenizer(self.input_text, return_tensors="pt")
379
- langid = languagecodes.mbart_large_languages[self.tl]
380
- generated_tokens = model.generate(
381
- **model_inputs,
382
- forced_bos_token_id=tokenizer.lang_code_to_id[langid]
383
- )
384
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
385
-
386
- def mbart_many_to_one(self):
387
- # translate to English
388
- from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
389
- model = MBartForConditionalGeneration.from_pretrained(self.model_name)
390
- tokenizer = MBart50TokenizerFast.from_pretrained(self.model_name)
391
- tokenizer.src_lang = languagecodes.mbart_large_languages[self.sl]
392
- encoded = tokenizer(self.input_text, return_tensors="pt")
393
- generated_tokens = model.generate(**encoded)
394
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
395
-
396
- def mtom(self):
397
- from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
398
- model = M2M100ForConditionalGeneration.from_pretrained(self.model_name)
399
- tokenizer = M2M100Tokenizer.from_pretrained(self.model_name)
400
- tokenizer.src_lang = self.sl
401
- encoded = tokenizer(self.input_text, return_tensors="pt")
402
- generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id(self.tl))
403
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
404
-
405
- def smallonehundred(self):
406
- from transformers import M2M100ForConditionalGeneration
407
- from tokenization_small100 import SMALL100Tokenizer
408
- model = M2M100ForConditionalGeneration.from_pretrained(self.model_name)
409
- tokenizer = SMALL100Tokenizer.from_pretrained(self.model_name)
410
- tokenizer.tgt_lang = self.tl
411
- encoded_sl = tokenizer(self.input_text, return_tensors="pt")
412
- generated_tokens = model.generate(**encoded_sl, max_length=256, num_beams=5)
413
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
414
-
415
- def LegoMT(self):
416
- from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
417
- model = M2M100ForConditionalGeneration.from_pretrained(self.model_name) # "Lego-MT/Lego-MT"
418
- tokenizer = M2M100Tokenizer.from_pretrained(self.model_name)
419
- tokenizer.src_lang = self.sl
420
- encoded = tokenizer(self.input_text, return_tensors="pt")
421
- generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id(self.tl))
422
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
423
-
424
- def bigscience(self):
425
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
426
- model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
427
- self.input_text = self.input_text if self.input_text.endswith('.') else f'{self.input_text}.'
428
- inputs = tokenizer.encode(f"Translate to {self.tl}: {self.input_text}", return_tensors="pt")
429
- outputs = model.generate(inputs)
430
- translation = tokenizer.decode(outputs[0])
431
- translation = translation.replace('<pad> ', '').replace('</s>', '')
432
- return translation
433
-
434
- def bloomz(self):
435
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
436
- model = AutoModelForCausalLM.from_pretrained(self.model_name)
437
- self.input_text = self.input_text if self.input_text.endswith('.') else f'{self.input_text}.'
438
- # inputs = tokenizer.encode(f"Translate from {self.sl} to {self.tl}: {self.input_text} Translation:", return_tensors="pt")
439
- inputs = tokenizer.encode(f"Translate to {self.tl}: {self.input_text}", return_tensors="pt")
440
- outputs = model.generate(inputs)
441
- translation = tokenizer.decode(outputs[0])
442
- translation = translation.replace('<pad> ', '').replace('</s>', '')
443
- translation = translation.split('Translation:')[-1].strip() if 'Translation:' in translation else translation.strip()
444
- return translation
445
-
446
- def nllb(self):
447
- tokenizer = AutoTokenizer.from_pretrained(self.model_name, src_lang=self.sl)
448
- # model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name, device_map="auto", torch_dtype=torch.bfloat16)
449
- model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
450
- translator = pipeline('translation', model=model, tokenizer=tokenizer, src_lang=self.sl, tgt_lang=self.tl)
451
- translated_text = translator(self.input_text, max_length=512)
452
- return translated_text[0]['translation_text']
453
-
454
- def seamlessm4t1(self):
455
- from transformers import AutoProcessor, SeamlessM4TModel
456
- processor = AutoProcessor.from_pretrained(self.model_name)
457
- model = SeamlessM4TModel.from_pretrained(self.model_name)
458
- src_lang = iso1toall.get(self.sl)[2] # 'deu', 'ron', 'eng', 'fra'
459
- tgt_lang = iso1toall.get(self.tl)[2]
460
- text_inputs = processor(text = self.input_text, src_lang=src_lang, return_tensors="pt")
461
- output_tokens = model.generate(**text_inputs, tgt_lang=tgt_lang, generate_speech=False)
462
- return processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True)
463
-
464
- def seamlessm4t2(self):
465
- from transformers import AutoProcessor, SeamlessM4Tv2ForTextToText
466
- processor = AutoProcessor.from_pretrained(self.model_name)
467
- model = SeamlessM4Tv2ForTextToText.from_pretrained(self.model_name)
468
- src_lang = iso1toall.get(self.sl)[2] # 'deu', 'ron', 'eng', 'fra'
469
- tgt_lang = iso1toall.get(self.tl)[2]
470
- text_inputs = processor(text=self.input_text, src_lang=src_lang, return_tensors="pt")
471
- decoder_input_ids = model.generate(**text_inputs, tgt_lang=tgt_lang)[0].tolist()
472
- return processor.decode(decoder_input_ids, skip_special_tokens=True)
473
-
474
- def wingpt(self):
475
- model = AutoModelForCausalLM.from_pretrained(
476
- self.model_name,
477
- torch_dtype="auto",
478
- device_map="auto"
479
- )
480
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
481
- # input_json = '{"input_text": self.input_text}'
482
- messages = [
483
- {"role": "system", "content": f"Translate this to {self.tl} language"},
484
- {"role": "user", "content": self.input_text}
485
- ]
486
-
487
- text = tokenizer.apply_chat_template(
488
- messages,
489
- tokenize=False,
490
- add_generation_prompt=True
491
- )
492
- model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
493
-
494
- generated_ids = model.generate(
495
- **model_inputs,
496
- max_new_tokens=512,
497
- temperature=0.1
498
- )
499
-
500
- generated_ids = [
501
- output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
502
- ]
503
- output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
504
- result = output.split('\n')[-1].strip() if '\n' in output else output.strip()
505
- return result
506
-
507
- def eurollm(self):
508
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
509
- model = AutoModelForCausalLM.from_pretrained(self.model_name)
510
- prompt = f"{self.sl}: {self.input_text} {self.tl}:"
511
- inputs = tokenizer(prompt, return_tensors="pt")
512
- outputs = model.generate(**inputs, max_new_tokens=512)
513
- output = tokenizer.decode(outputs[0], skip_special_tokens=True)
514
- print(output)
515
- # result = output.rsplit(f'{self.tl}:')[-1].strip() if f'{self.tl}:' in output else output.strip()
516
- result = output.rsplit(f'{self.tl}:')[-1].strip() if '\n' in output or f'{self.tl}:' in output else output.strip()
517
- return result
518
-
519
- def eurollm_instruct(self):
520
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
521
- model = AutoModelForCausalLM.from_pretrained(self.model_name)
522
- text = f'<|im_start|>system\n<|im_end|>\n<|im_start|>user\nTranslate the following {self.sl} source text to {self.tl}:\n{self.sl}: {self.input_text} \n{self.tl}: <|im_end|>\n<|im_start|>assistant\n'
523
- inputs = tokenizer(text, return_tensors="pt")
524
- outputs = model.generate(**inputs, max_new_tokens=512)
525
- output = tokenizer.decode(outputs[0], skip_special_tokens=True)
526
- if f'{self.tl}:' in output:
527
- output = output.rsplit(f'{self.tl}:')[-1].strip().replace('assistant\n', '').strip()
528
- return output
529
-
530
- def unbabel(self):
531
- pipe = pipeline("text-generation", model=self.model_name, torch_dtype=torch.bfloat16, device_map="auto")
532
- messages = [{"role": "user",
533
- "content": f"Translate the following text from {self.sl} into {self.tl}.\n{self.sl}: {self.input_text}.\n{self.tl}:"}]
534
- prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
535
- tokenized_input = pipe.tokenizer(self.input_text, return_tensors="pt")
536
- num_input_tokens = len(tokenized_input["input_ids"][0])
537
- max_new_tokens = round(num_input_tokens + 0.5 * num_input_tokens)
538
- outputs = pipe(prompt, max_new_tokens=max_new_tokens, do_sample=False)
539
- translated_text = outputs[0]["generated_text"]
540
- print(f"Input chars: {len(self.input_text)}", f"Input tokens: {num_input_tokens}", f"max_new_tokens: {max_new_tokens}",
541
- "Chars to tokens ratio:", round(len(self.input_text) / num_input_tokens, 2), f"Raw translation: {translated_text}")
542
- markers = ["<end_of_turn>", "<|im_end|>", "<|im_start|>assistant"] # , "\n"
543
- for marker in markers:
544
- if marker in translated_text:
545
- translated_text = translated_text.split(marker)[1].strip()
546
- translated_text = translated_text.replace('Answer:', '', 1).strip() if translated_text.startswith('Answer:') else translated_text
547
- translated_text = translated_text.split("Translated text:")[0].strip() if "Translated text:" in translated_text else translated_text
548
- split_translated_text = translated_text.split('\n', translated_text.count('\n'))
549
- translated_text = '\n'.join(split_translated_text[:self.input_text.count('\n')+1])
550
- return translated_text
551
-
552
- def bergamot(model_name: str = 'deen', sl: str = 'de', tl: str = 'en', input_text: str = 'Hallo, mein Freund'):
553
- try:
554
- import bergamot
555
- # input_text = [input_text] if isinstance(input_text, str) else input_text
556
- config = bergamot.ServiceConfig(numWorkers=4)
557
- service = bergamot.Service(config)
558
- model = service.modelFromConfigPath(f"./{model_name}/bergamot.config.yml")
559
- options = bergamot.ResponseOptions(alignment=False, qualityScores=False, HTML=False)
560
- rawresponse = service.translate(model, bergamot.VectorString(input_text), options)
561
- translated_text: str = next(iter(rawresponse)).target.text
562
- message_text = f"Translated from {sl} to {tl} with Bergamot {model_name}."
563
- except Exception as error:
564
- response = error
565
- return translated_text, message_text
566
 
567
  @timer
568
  @spaces.GPU
569
- def translate_text(input_text: str, s_language: str, t_language: str, model_name: str) -> tuple[str, str]:
570
  """
571
- Translates the input text from the source language to the target language using a specified model.
572
 
573
  Parameters:
574
  input_text (str): The source text to be translated
575
- s_language (str): The source language of the input text
576
- t_language (str): The target language in which the input text is translated
577
- model_name (str): The selected translation model name
578
-
579
  Returns:
580
  tuple:
581
- translated_text(str): The input text translated to the selected target language
582
- message_text(str): A descriptive message summarizing the translation process. Example: "Translated from English to German with Helsinki-NLP."
583
 
584
  Example:
585
- >>> translate_text("Hello world", "English", "German", "Helsinki-NLP")
586
- ("Hallo Welt", "Translated from English to German with Helsinki-NLP.")
587
  """
 
 
 
 
 
 
588
 
589
- sl = all_langs[s_language][0]
590
- tl = all_langs[t_language][0]
591
- message_text = f'Translated from {s_language} to {t_language} with {model_name}'
592
- if not input_text or input_text.strip() == '':
593
- translated_text = f'No input text entered!'
594
- message_text = 'Please enter a text to translate!'
595
- return translated_text, message_text
596
- if sl == tl:
597
- translated_text = f'Source language {s_language} identical to target language {t_language}!'
598
- message_text = 'Please choose different target and source language!'
599
- return translated_text, message_text
600
- try:
601
- if "-mul" in model_name.lower() or "mul-" in model_name.lower() or "-roa" in model_name.lower():
602
- translated_text, message_text = Translators(model_name, sl, tl, input_text).HelsinkiNLP_mulroa()
603
-
604
- elif model_name == "Helsinki-NLP":
605
- translated_text, message_text = Translators(model_name, sl, tl, input_text).HelsinkiNLP()
606
-
607
- elif model_name == 'Argos':
608
- translated_text = Translators(model_name, sl, tl, input_text).argos()
609
-
610
- elif model_name == "QUICKMT":
611
- translated_text, message_text = Translators(model_name, sl, tl, input_text).quickmt()
612
-
613
- elif model_name == 'Google':
614
- translated_text = Translators(model_name, sl, tl, input_text).google()
615
-
616
- elif model_name == "Helsinki-NLP/opus-mt-tc-bible-big-roa-en":
617
- translated_text, message_text = Translators(model_name, sl, tl, input_text).simplepipe()
618
-
619
- elif 'mitre' in model_name.lower():
620
- translated_text = Translators(model_name, sl, tl, input_text).mitre()
621
-
622
- elif "m2m" in model_name.lower():
623
- translated_text = Translators(model_name, sl, tl, input_text).mtom()
624
-
625
- elif "small100" in model_name.lower():
626
- translated_text = Translators(model_name, sl, tl, input_text).smallonehundred()
627
-
628
- elif "rosetta" in model_name.lower():
629
- translated_text = Translators(model_name, s_language, t_language, input_text).rosetta()
630
-
631
- elif "lego" in model_name.lower():
632
- translated_text = Translators(model_name, sl, tl, input_text).LegoMT()
633
-
634
- elif "niutrans" in model_name.lower():
635
- translated_text = Translators(model_name, sl, tl, input_text).niutrans()
636
-
637
- elif "salamandra" in model_name.lower():
638
- translated_text = Translators(model_name, s_language, t_language, input_text).salamandratapipe()
639
-
640
- elif model_name.startswith('google-t5'):
641
- translated_text = Translators(model_name, s_language, t_language, input_text).tfive()
642
-
643
- elif 'flan' in model_name.lower():
644
- translated_text = Translators(model_name, s_language, t_language, input_text).flan()
645
-
646
- elif 'madlad' in model_name.lower():
647
- translated_text = Translators(model_name, sl, tl, input_text).madlad()
648
-
649
- elif 'mt0' in model_name.lower():
650
- translated_text = Translators(model_name, s_language, t_language, input_text).bigscience()
651
-
652
- elif 'bloomz' in model_name.lower():
653
- translated_text = Translators(model_name, s_language, t_language, input_text).bloomz()
654
-
655
- elif 'nllb' in model_name.lower():
656
- nnlbsl, nnlbtl = languagecodes.nllb_language_codes[s_language], languagecodes.nllb_language_codes[t_language]
657
- translated_text = Translators(model_name, nnlbsl, nnlbtl, input_text).nllb()
658
-
659
- elif model_name == "facebook/mbart-large-50-many-to-many-mmt":
660
- translated_text = Translators(model_name, s_language, t_language, input_text).mbart_many_to_many()
661
-
662
- elif model_name == "facebook/mbart-large-50-one-to-many-mmt":
663
- translated_text = Translators(model_name, s_language, t_language, input_text).mbart_one_to_many()
664
-
665
- elif model_name == "facebook/mbart-large-50-many-to-one-mmt":
666
- translated_text = Translators(model_name, s_language, t_language, input_text).mbart_many_to_one()
667
-
668
- elif model_name == "facebook/seamless-m4t-v2-large":
669
- translated_text = Translators(model_name, sl, tl, input_text).seamlessm4t2()
670
-
671
- elif "m4t-medium" in model_name or "m4t-large" in model_name:
672
- translated_text = Translators(model_name, sl, tl, input_text).seamlessm4t1()
673
-
674
- elif model_name == "utter-project/EuroLLM-1.7B-Instruct":
675
- translated_text = Translators(model_name, s_language, t_language, input_text).eurollm_instruct()
676
-
677
- elif model_name == "utter-project/EuroLLM-1.7B":
678
- translated_text = Translators(model_name, s_language, t_language, input_text).eurollm()
679
-
680
- elif 'Unbabel' in model_name:
681
- translated_text = Translators(model_name, s_language, t_language, input_text).unbabel()
682
-
683
- elif "winninghealth/WiNGPT" in model_name:
684
- translated_text = Translators(model_name, s_language, t_language, input_text).wingpt()
685
-
686
- elif "HPLT" in model_name:
687
- if model_name == "HPLT-OPUS":
688
- translated_text, message_text = Translators(model_name, sl, tl, input_text).hplt(opus = True)
689
- else:
690
- translated_text, message_text = Translators(model_name, sl, tl, input_text).hplt()
691
-
692
- elif model_name == "Bergamot":
693
- translated_text, message_text = Translators(model_name, s_language, t_language, input_text).bergamot()
694
-
695
- except Exception as trerror:
696
- translated_text = f'Error in main function "translate_text": {trerror}'
697
- finally:
698
- print(input_text, translated_text, message_text)
699
- return translated_text, message_text
700
-
701
- def swap_languages(src_lang, tgt_lang):
702
- '''Swap dropdown values for source and target language'''
703
- return tgt_lang, src_lang
704
-
705
- def get_info(model_name: str, sl: str = None, tl: str = None):
706
- helsinki = '### [Helsinki-NLP](https://huggingface.co/Helsinki-NLP "Helsinki-NLP")'
707
- if model_name == "Helsinki-NLP" and sl and tl:
708
- url = f'https://huggingface.co/{model_name}/opus-mt-{sl}-{tl}/raw/main/README.md'
709
- response = httpx.get(url).text
710
- if 'Repository not found' in response or 'Invalid username or password' in response:
711
- return helsinki
712
- return response
713
- elif model_name == "Argos":
714
- return httpx.get(f'https://huggingface.co/TiberiuCristianLeon/Argostranslate/raw/main/README.md').text
715
- elif "HPLT" in model_name:
716
- return """[HPLT Uni direction translation models](https://huggingface.co/collections/HPLT/hplt-12-uni-direction-translation-models)
717
- ['ar-en', 'bs-en', 'ca-en', 'en-ar', 'en-bs', 'en-ca', 'en-et', 'en-eu', 'en-fi',
718
- 'en-ga', 'en-gl', 'en-hi', 'en-hr', 'en-is', 'en-mt', 'en-nn', 'en-sq', 'en-sw',
719
- 'en-zh_hant', 'et-en', 'eu-en', 'fi-en', 'ga-en', 'gl-en', 'hi-en', 'hr-en',
720
- 'is-en', 'mt-en', 'nn-en', 'sq-en', 'sw-en', 'zh_hant-en']"""
721
- elif "QUICKMT" in model_name:
722
- return """[QUICKMT](https://huggingface.co/quickmt)
723
- ['ar', 'bn', 'cs', 'da', 'de', 'el', 'en', 'es', 'fa', 'fr', 'he',
724
- 'hi', 'hu', 'id', 'it', 'is', 'ja', 'ko', 'lv', 'pl', 'pt', 'ro', 'ru',
725
- 'sv', 'th', 'tr', 'ur', 'vi', 'zh']"""
726
- elif model_name == "Google":
727
- return "Google Translate Online"
728
- else:
729
- return httpx.get(f'https://huggingface.co/{model_name}/raw/main/README.md').text
730
-
731
  with gr.Blocks() as interface:
732
  gr.Markdown("### Machine Text Translation with Gradio API and MCP Server")
733
- input_text = gr.Textbox(label="Enter text to translate:", placeholder="Type your text here, maximum 512 tokens",
734
- autofocus=True, submit_btn='Translate', max_length=512)
735
  with gr.Row(variant="compact"):
736
- s_language = gr.Dropdown(choices=langs, value = DEFAULTS[0], label="Source language", interactive=True, scale=2)
737
- t_language = gr.Dropdown(choices=langs, value = DEFAULTS[1], label="Target language", interactive=True, scale=2)
738
- swap_btn = gr.Button("Swap Languages", size="md", scale=1)
739
- swap_btn.click(fn=swap_languages, inputs=[s_language, t_language], outputs=[s_language, t_language], api_visibility="private")
740
- # with gr.Row(equal_height=True):
741
- model_name = gr.Dropdown(choices=models, label=f"Select a model. Default is {DEFAULTS[2]}.", value=DEFAULTS[2], interactive=True, scale=2)
742
- # translate_btn = gr.Button(value="Translate", scale=1)
743
-
744
- translated_text = gr.Textbox(label="Translated text:", placeholder="Display field for translation", interactive=False, buttons=["copy"], lines=2)
745
- message_text = gr.Textbox(label="Messages:", placeholder="Display field for status and error messages", interactive=False,
746
  value=f'Default translation settings: from {s_language.value} to {t_language.value} with {model_name.value}.', lines=2)
747
- allmodels = gr.HTML(label="Models with links:", value=', '.join([f'<a href="https://huggingface.co/{model}">{model}</a>' for model in models]),
748
- show_label=False, container=True, css_template="""a {padding: 0px;}""")
749
- model_info = gr.Markdown(label="Model info:", value=get_info(DEFAULTS[2], DEFAULTS[0], DEFAULTS[1]), buttons=["copy"])
750
-
751
- model_name.change(fn=get_info, inputs=[model_name, s_language, t_language], outputs=model_info, api_visibility="private")
752
- # translate_btn.click(
753
- # fn=translate_text,
754
- # inputs=[input_text, s_language, t_language, model_name],
755
- # outputs=[translated_text, message_text]
756
- # )
757
  input_text.submit(
758
- fn=translate_text,
759
- inputs=[input_text, s_language, t_language, model_name],
760
- outputs=[translated_text, message_text]
761
  )
762
  if __name__ == "__main__":
763
  interface.launch(mcp_server=True, footer_links=["api", "settings"])
 
8
  import polars as pl
9
 
10
  hflogging.set_verbosity_error()
 
11
  df = pl.read_parquet("isolanguages.parquet")
12
  non_empty_isos = df.slice(1).filter(pl.col("ISO639-1") != "").rows()
13
  # all_langs = languagecodes.iso_languages_byname
14
  all_langs = {iso[0]: (iso[1], iso[2], iso[3]) for iso in non_empty_isos} # {'Romanian': ('ro', 'rum', 'ron')}
15
  iso1toall = {iso[1]: (iso[0], iso[2], iso[3]) for iso in non_empty_isos} # {'ro': ('Romanian', 'rum', 'ron')}
16
+
 
 
17
  models = ["Helsinki-NLP", "QUICKMT", "Argos", "HPLT", "HPLT-OPUS", "Google",
18
  "Helsinki-NLP/opus-mt-tc-bible-big-mul-mul", "Helsinki-NLP/opus-mt-tc-bible-big-mul-deu_eng_nld",
19
  "Helsinki-NLP/opus-mt-tc-bible-big-mul-deu_eng_fra_por_spa", "Helsinki-NLP/opus-mt-tc-bible-big-deu_eng_fra_por_spa-mul",
 
47
  return translated_text, message_text
48
  return translate
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  @timer
52
  @spaces.GPU
53
+ def detect_language(input_text: str) -> tuple[str, str]:
54
  """
55
+ Detects the input text from the source language to the target language using a specified model.
56
 
57
  Parameters:
58
  input_text (str): The source text to be translated
59
+
 
 
 
60
  Returns:
61
  tuple:
62
+ detected_text(str): The input text translated to the selected target language
63
+ confidence(str): A descriptive message summarizing the translation process. Example: "Translated from English to German with Helsinki-NLP."
64
 
65
  Example:
66
+ >>> detect_language("Hello world")
67
+ ("en", 1.0)
68
  """
69
+ from langdetect import detect, detect_langs
70
+ from langdetect import DetectorFactory
71
+ DetectorFactory.seed = 0
72
+ langcode = detect(input_text)
73
+ langecode_probabilities: list[Language] = detect_langs(self.text)
74
+ return langcode, round(number=langecode_probabilities[0].prob * 100, ndigits=2)
75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  with gr.Blocks() as interface:
77
  gr.Markdown("### Machine Text Translation with Gradio API and MCP Server")
78
+ input_text = gr.Textbox(label="Enter text to detect:", placeholder="Type your text here, maximum 512 tokens",
79
+ autofocus=True, submit_btn='Detect Language', max_length=512)
80
  with gr.Row(variant="compact"):
81
+ detected_text = gr.Textbox(label="Translated text:", placeholder="Display field for translation", interactive=False, buttons=["copy"], lines=2)
82
+ confidence = gr.Textbox(label="Confidence:", placeholder="Display field for confidence score", interactive=False,
 
 
 
 
 
 
 
 
83
  value=f'Default translation settings: from {s_language.value} to {t_language.value} with {model_name.value}.', lines=2)
 
 
 
 
 
 
 
 
 
 
84
  input_text.submit(
85
+ fn=detect_language,
86
+ inputs=[input_text],
87
+ outputs=[detected_text, confidence]
88
  )
89
  if __name__ == "__main__":
90
  interface.launch(mcp_server=True, footer_links=["api", "settings"])