team-ai / backend /translator.py
peichao.dong
add langgrah example
7554512
from langchain.chat_models import init_chat_model
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePick
from langsmith import traceable
import ollama
from backend.models import models
first_llm = init_chat_model(
model="gpt-4o-mini",
streaming=True,
temperature=0,
configurable_fields=("model", "model_provider", "temperature", "max_tokens"),
config_prefix="first", # useful when you have a chain with multiple models
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are a helpful translator. Translate the following content without losing any information. The language to translate to is {language}.
{content}
""",
)
]
)
evaluate_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are a helpful translator evaluator. Evaluate the following translation, and give improve suggestions if necessary. The language to translate to is {language}.
Initial Content:'''
{content}
'''
Translated Content:'''
{initial_translation}
'''
"""
)
]
)
optimize_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are a helpful translator optimizor. Optimize the 'Tnitial Translation' acoording to the suggestions provided without losing any information. The language to translate to is {language}.
Initial Content:'''
{content}
'''
Initial Translation:'''
{initial_translation}
'''
Suggestions:'''
{suggestions}
'''
Just return the final optimized translation without any other information.
"""
)
]
)
chain = prompt | first_llm | StrOutputParser()
evaluate_chain = ({"initial_translation": chain, "language": RunnablePick("language"), "content": RunnablePick("content")} | evaluate_prompt | first_llm | StrOutputParser())
optimize_chain = ({"suggestions": evaluate_chain, "language": RunnablePick("language"), "content": RunnablePick("content"), "initial_translation": RunnablePick("initial_translation")} | optimize_prompt | first_llm | StrOutputParser())
@traceable()
def translateRespond(
message,
history: list[tuple[str, str]],
model,
language,
max_tokens,
temperature,
):
providor = models[model] if model in models else "openai"
return optimize_chain.invoke(
{"content": message, "language": language if language else "Chinese"},
config={"configurable": {
"first_model": model,
"first_model_provider": providor,
"first_max_tokens": max_tokens,
"first_temperature": temperature
}},
)