Spaces:
Sleeping
Sleeping
huangjunxin commited on
Commit ·
1206cdb
1
Parent(s): ce37f80
Added count time_elapsed, and return more parameters
Browse files- app/routes/text_translator.py +5 -5
- utils/translate/translate_baichuan.py +11 -2
- utils/translate/translate_google.py +11 -2
- utils/translate/translate_hkbu_chatgpt.py +10 -2
- utils/translate/translate_openai.py +11 -2
- utils/translate/translate_openrouter.py +6 -6
- utils/translate/translate_zhipuai.py +11 -2
app/routes/text_translator.py
CHANGED
|
@@ -52,25 +52,25 @@ def translate_text(source_language, target_language, original_text, tone_of_voic
|
|
| 52 |
elif model == "Volcengine":
|
| 53 |
translated_text = translate_by_volcengine_api(source_language, target_language, original_text)
|
| 54 |
elif model in model_dict_translate_by_hkbu_chatgpt_api:
|
| 55 |
-
translation_sample, translated_text = translate_by_hkbu_chatgpt_api(
|
| 56 |
source_language, target_language, original_text, tone_of_voice, industry,
|
| 57 |
model_dict_translate_by_hkbu_chatgpt_api[model]
|
| 58 |
)
|
| 59 |
elif model in model_dict_translate_by_openrouter_api:
|
| 60 |
-
translation_sample, translated_text = translate_by_openrouter_api(
|
| 61 |
source_language, target_language, original_text, tone_of_voice, industry,
|
| 62 |
model_dict_translate_by_openrouter_api[model]
|
| 63 |
)
|
| 64 |
elif model == "Google Gemini (gemini-pro)":
|
| 65 |
-
translation_sample, translated_text = translate_by_google_api(
|
| 66 |
source_language, target_language, original_text, tone_of_voice, industry
|
| 67 |
)
|
| 68 |
elif model == "Baichuan AI (Baichuan2)":
|
| 69 |
-
translation_sample, translated_text = translate_by_baichuan_api(
|
| 70 |
source_language, target_language, original_text, tone_of_voice, industry
|
| 71 |
)
|
| 72 |
elif model in model_dict_translate_by_zhipuai_api:
|
| 73 |
-
translation_sample, translated_text = translate_by_zhipuai_api(
|
| 74 |
source_language, target_language, original_text, tone_of_voice, industry,
|
| 75 |
model_dict_translate_by_zhipuai_api[model]
|
| 76 |
)
|
|
|
|
| 52 |
elif model == "Volcengine":
|
| 53 |
translated_text = translate_by_volcengine_api(source_language, target_language, original_text)
|
| 54 |
elif model in model_dict_translate_by_hkbu_chatgpt_api:
|
| 55 |
+
translation_sample, rationale, translated_text, res_content, time_elapsed = translate_by_hkbu_chatgpt_api(
|
| 56 |
source_language, target_language, original_text, tone_of_voice, industry,
|
| 57 |
model_dict_translate_by_hkbu_chatgpt_api[model]
|
| 58 |
)
|
| 59 |
elif model in model_dict_translate_by_openrouter_api:
|
| 60 |
+
translation_sample, rationale, translated_text, res_content, time_elapsed = translate_by_openrouter_api(
|
| 61 |
source_language, target_language, original_text, tone_of_voice, industry,
|
| 62 |
model_dict_translate_by_openrouter_api[model]
|
| 63 |
)
|
| 64 |
elif model == "Google Gemini (gemini-pro)":
|
| 65 |
+
translation_sample, rationale, translated_text, res_content, time_elapsed = translate_by_google_api(
|
| 66 |
source_language, target_language, original_text, tone_of_voice, industry
|
| 67 |
)
|
| 68 |
elif model == "Baichuan AI (Baichuan2)":
|
| 69 |
+
translation_sample, rationale, translated_text, res_content, time_elapsed = translate_by_baichuan_api(
|
| 70 |
source_language, target_language, original_text, tone_of_voice, industry
|
| 71 |
)
|
| 72 |
elif model in model_dict_translate_by_zhipuai_api:
|
| 73 |
+
translation_sample, rationale, translated_text, res_content, time_elapsed = translate_by_zhipuai_api(
|
| 74 |
source_language, target_language, original_text, tone_of_voice, industry,
|
| 75 |
model_dict_translate_by_zhipuai_api[model]
|
| 76 |
)
|
utils/translate/translate_baichuan.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
from langchain_community.chat_models import ChatBaichuan
|
| 3 |
from langchain.schema import HumanMessage
|
| 4 |
from dotenv import load_dotenv
|
|
@@ -14,6 +15,9 @@ baichuan_secret_key = os.environ.get("BAICHUAN_SECRET_KEY")
|
|
| 14 |
def translate_by_baichuan_api(source_language, target_language, original_text, tone_of_voice, industry):
|
| 15 |
# Prompt to provide translation
|
| 16 |
translation_sample, translation_prompt = generate_translation_prompt(source_language, target_language, original_text, tone_of_voice, industry)
|
|
|
|
|
|
|
|
|
|
| 17 |
# Translate by accessing Baichuan API
|
| 18 |
chat = ChatBaichuan(temperature=0.7, baichuan_api_key=baichuan_api_key, baichuan_secret_key=baichuan_secret_key, model='Baichuan2')
|
| 19 |
res = chat(
|
|
@@ -22,7 +26,12 @@ def translate_by_baichuan_api(source_language, target_language, original_text, t
|
|
| 22 |
]
|
| 23 |
)
|
| 24 |
res_content = res.content
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 27 |
|
| 28 |
-
return translation_sample, translated_text
|
|
|
|
| 1 |
import os
|
| 2 |
+
import time
|
| 3 |
from langchain_community.chat_models import ChatBaichuan
|
| 4 |
from langchain.schema import HumanMessage
|
| 5 |
from dotenv import load_dotenv
|
|
|
|
| 15 |
def translate_by_baichuan_api(source_language, target_language, original_text, tone_of_voice, industry):
|
| 16 |
# Prompt to provide translation
|
| 17 |
translation_sample, translation_prompt = generate_translation_prompt(source_language, target_language, original_text, tone_of_voice, industry)
|
| 18 |
+
|
| 19 |
+
start_time = time.time()
|
| 20 |
+
|
| 21 |
# Translate by accessing Baichuan API
|
| 22 |
chat = ChatBaichuan(temperature=0.7, baichuan_api_key=baichuan_api_key, baichuan_secret_key=baichuan_secret_key, model='Baichuan2')
|
| 23 |
res = chat(
|
|
|
|
| 26 |
]
|
| 27 |
)
|
| 28 |
res_content = res.content
|
| 29 |
+
|
| 30 |
+
end_time = time.time()
|
| 31 |
+
time_elapsed = end_time - start_time
|
| 32 |
+
print("Time Elapsed:", time_elapsed, "seconds")
|
| 33 |
+
print("Result content:", res_content)
|
| 34 |
+
|
| 35 |
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 36 |
|
| 37 |
+
return translation_sample, rationale, translated_text, res_content, time_elapsed
|
utils/translate/translate_google.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
|
|
@@ -12,11 +13,19 @@ google_api_key = os.environ.get("GOOGLE_API_KEY")
|
|
| 12 |
def translate_by_google_api(source_language, target_language, original_text, tone_of_voice, industry):
|
| 13 |
# Prompt to provide translation
|
| 14 |
translation_sample, translation_prompt = generate_translation_prompt(source_language, target_language, original_text, tone_of_voice, industry)
|
|
|
|
|
|
|
|
|
|
| 15 |
# Translate by accessing Google API
|
| 16 |
chat = ChatGoogleGenerativeAI(temperature=0.7, model="gemini-pro")
|
| 17 |
res = chat.invoke(translation_prompt)
|
| 18 |
res_content = res.content
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 21 |
|
| 22 |
-
return translation_sample, translated_text
|
|
|
|
| 1 |
import os
|
| 2 |
+
import time
|
| 3 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
|
|
|
|
| 13 |
def translate_by_google_api(source_language, target_language, original_text, tone_of_voice, industry):
|
| 14 |
# Prompt to provide translation
|
| 15 |
translation_sample, translation_prompt = generate_translation_prompt(source_language, target_language, original_text, tone_of_voice, industry)
|
| 16 |
+
|
| 17 |
+
start_time = time.time()
|
| 18 |
+
|
| 19 |
# Translate by accessing Google API
|
| 20 |
chat = ChatGoogleGenerativeAI(temperature=0.7, model="gemini-pro")
|
| 21 |
res = chat.invoke(translation_prompt)
|
| 22 |
res_content = res.content
|
| 23 |
+
|
| 24 |
+
end_time = time.time()
|
| 25 |
+
time_elapsed = end_time - start_time
|
| 26 |
+
print("Time Elapsed:", time_elapsed, "seconds")
|
| 27 |
+
print("Result content:", res_content)
|
| 28 |
+
|
| 29 |
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 30 |
|
| 31 |
+
return translation_sample, rationale, translated_text, res_content, time_elapsed
|
utils/translate/translate_hkbu_chatgpt.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
from dotenv import load_dotenv
|
| 3 |
import requests
|
| 4 |
|
|
@@ -31,6 +32,9 @@ def call_hkbu_chatgpt_api(conversation_list, model_name="gpt-35-turbo", temperat
|
|
| 31 |
def translate_by_hkbu_chatgpt_api(source_language, target_language, original_text, tone_of_voice, industry, model_name="gpt-35-turbo-16k"):
|
| 32 |
# Prompt to provide translation
|
| 33 |
translation_sample, translation_prompt = generate_translation_prompt(source_language, target_language, original_text, tone_of_voice, industry)
|
|
|
|
|
|
|
|
|
|
| 34 |
res = ""
|
| 35 |
try:
|
| 36 |
# Translate by accessing HKBU ChatGPT API
|
|
@@ -46,7 +50,11 @@ def translate_by_hkbu_chatgpt_api(source_language, target_language, original_tex
|
|
| 46 |
res_content = 'Error:', e, res
|
| 47 |
return res_content
|
| 48 |
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 51 |
|
| 52 |
-
return translation_sample, translated_text
|
|
|
|
| 1 |
import os
|
| 2 |
+
import time
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
import requests
|
| 5 |
|
|
|
|
| 32 |
def translate_by_hkbu_chatgpt_api(source_language, target_language, original_text, tone_of_voice, industry, model_name="gpt-35-turbo-16k"):
|
| 33 |
# Prompt to provide translation
|
| 34 |
translation_sample, translation_prompt = generate_translation_prompt(source_language, target_language, original_text, tone_of_voice, industry)
|
| 35 |
+
|
| 36 |
+
start_time = time.time()
|
| 37 |
+
|
| 38 |
res = ""
|
| 39 |
try:
|
| 40 |
# Translate by accessing HKBU ChatGPT API
|
|
|
|
| 50 |
res_content = 'Error:', e, res
|
| 51 |
return res_content
|
| 52 |
|
| 53 |
+
end_time = time.time()
|
| 54 |
+
time_elapsed = end_time - start_time
|
| 55 |
+
print("Time Elapsed:", time_elapsed, "seconds")
|
| 56 |
+
print("Result content:", res_content)
|
| 57 |
+
|
| 58 |
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 59 |
|
| 60 |
+
return translation_sample, rationale, translated_text, res_content, time_elapsed
|
utils/translate/translate_openai.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
from langchain_community.chat_models import ChatOpenAI
|
| 3 |
from langchain.schema import HumanMessage
|
| 4 |
from dotenv import load_dotenv
|
|
@@ -13,6 +14,9 @@ openai_api_key = os.environ.get("OPENAI_API_KEY")
|
|
| 13 |
def translate_by_openai_api(source_language, target_language, original_text, tone_of_voice, industry, model_name="gpt-3.5-turbo-1106"):
|
| 14 |
# Prompt to provide translation
|
| 15 |
translation_sample, translation_prompt = generate_translation_prompt(source_language, target_language, original_text, tone_of_voice, industry)
|
|
|
|
|
|
|
|
|
|
| 16 |
# Translate by accessing OpenAI API
|
| 17 |
chat = ChatOpenAI(temperature=0.7, openai_api_key=openai_api_key, model_name=model_name)
|
| 18 |
res = chat(
|
|
@@ -21,7 +25,12 @@ def translate_by_openai_api(source_language, target_language, original_text, ton
|
|
| 21 |
]
|
| 22 |
)
|
| 23 |
res_content = res.content
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 26 |
|
| 27 |
-
return translation_sample, translated_text
|
|
|
|
| 1 |
import os
|
| 2 |
+
import time
|
| 3 |
from langchain_community.chat_models import ChatOpenAI
|
| 4 |
from langchain.schema import HumanMessage
|
| 5 |
from dotenv import load_dotenv
|
|
|
|
| 14 |
def translate_by_openai_api(source_language, target_language, original_text, tone_of_voice, industry, model_name="gpt-3.5-turbo-1106"):
|
| 15 |
# Prompt to provide translation
|
| 16 |
translation_sample, translation_prompt = generate_translation_prompt(source_language, target_language, original_text, tone_of_voice, industry)
|
| 17 |
+
|
| 18 |
+
start_time = time.time()
|
| 19 |
+
|
| 20 |
# Translate by accessing OpenAI API
|
| 21 |
chat = ChatOpenAI(temperature=0.7, openai_api_key=openai_api_key, model_name=model_name)
|
| 22 |
res = chat(
|
|
|
|
| 25 |
]
|
| 26 |
)
|
| 27 |
res_content = res.content
|
| 28 |
+
|
| 29 |
+
end_time = time.time()
|
| 30 |
+
time_elapsed = end_time - start_time
|
| 31 |
+
print("Time Elapsed:", time_elapsed, "seconds")
|
| 32 |
+
print("Result content:", res_content)
|
| 33 |
+
|
| 34 |
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 35 |
|
| 36 |
+
return translation_sample, rationale, translated_text, res_content, time_elapsed
|
utils/translate/translate_openrouter.py
CHANGED
|
@@ -55,17 +55,17 @@ def translate_by_openrouter_api(source_language, target_language, original_text,
|
|
| 55 |
temperature=0.7
|
| 56 |
)
|
| 57 |
if isinstance(res, tuple) and res[0] == 'Error':
|
| 58 |
-
|
| 59 |
else:
|
| 60 |
-
|
| 61 |
except Exception as e:
|
| 62 |
-
|
| 63 |
|
| 64 |
end_time = time.time()
|
| 65 |
time_elapsed = end_time - start_time
|
| 66 |
print("Time Elapsed:", time_elapsed, "seconds")
|
| 67 |
-
print("
|
| 68 |
|
| 69 |
-
rationale, translated_text = extract_json_from_response(target_language,
|
| 70 |
|
| 71 |
-
return translation_sample, translated_text
|
|
|
|
| 55 |
temperature=0.7
|
| 56 |
)
|
| 57 |
if isinstance(res, tuple) and res[0] == 'Error':
|
| 58 |
+
res_content = res
|
| 59 |
else:
|
| 60 |
+
res_content = res["choices"][0]["message"]["content"]
|
| 61 |
except Exception as e:
|
| 62 |
+
res_content = 'Error:', e, res
|
| 63 |
|
| 64 |
end_time = time.time()
|
| 65 |
time_elapsed = end_time - start_time
|
| 66 |
print("Time Elapsed:", time_elapsed, "seconds")
|
| 67 |
+
print("Result content:", res_content)
|
| 68 |
|
| 69 |
+
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 70 |
|
| 71 |
+
return translation_sample, rationale, translated_text, res_content, time_elapsed
|
utils/translate/translate_zhipuai.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
from zhipuai import ZhipuAI
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
|
|
@@ -17,6 +18,9 @@ zhipuai_api_key = os.environ.get("ZHIPUAI_API_KEY")
|
|
| 17 |
def translate_by_zhipuai_api(source_language, target_language, original_text, tone_of_voice, industry, model_name="glm-3-turbo"):
|
| 18 |
# Prompt to provide translation
|
| 19 |
translation_sample, translation_prompt = generate_translation_prompt(source_language, target_language, original_text, tone_of_voice, industry)
|
|
|
|
|
|
|
|
|
|
| 20 |
# Translate by accessing ZhipuAI API
|
| 21 |
chat = ZhipuAI(api_key=zhipuai_api_key)
|
| 22 |
res = chat.chat.completions.create(
|
|
@@ -27,7 +31,12 @@ def translate_by_zhipuai_api(source_language, target_language, original_text, to
|
|
| 27 |
temperature=0.7
|
| 28 |
)
|
| 29 |
res_content = res.choices[0].message.content
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 32 |
|
| 33 |
-
return translation_sample, translated_text
|
|
|
|
| 1 |
import os
|
| 2 |
+
import time
|
| 3 |
from zhipuai import ZhipuAI
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
|
|
|
|
| 18 |
def translate_by_zhipuai_api(source_language, target_language, original_text, tone_of_voice, industry, model_name="glm-3-turbo"):
|
| 19 |
# Prompt to provide translation
|
| 20 |
translation_sample, translation_prompt = generate_translation_prompt(source_language, target_language, original_text, tone_of_voice, industry)
|
| 21 |
+
|
| 22 |
+
start_time = time.time()
|
| 23 |
+
|
| 24 |
# Translate by accessing ZhipuAI API
|
| 25 |
chat = ZhipuAI(api_key=zhipuai_api_key)
|
| 26 |
res = chat.chat.completions.create(
|
|
|
|
| 31 |
temperature=0.7
|
| 32 |
)
|
| 33 |
res_content = res.choices[0].message.content
|
| 34 |
+
|
| 35 |
+
end_time = time.time()
|
| 36 |
+
time_elapsed = end_time - start_time
|
| 37 |
+
print("Time Elapsed:", time_elapsed, "seconds")
|
| 38 |
+
print("Result content:", res_content)
|
| 39 |
+
|
| 40 |
rationale, translated_text = extract_json_from_response(target_language, res_content)
|
| 41 |
|
| 42 |
+
return translation_sample, rationale, translated_text, res_content, time_elapsed
|