ShadowGard3n's picture
Deploying
f639e70
from langchain_core.prompts import ChatPromptTemplate
# Change import from OpenAI to Google GenAI
from langchain_google_genai import ChatGoogleGenerativeAI
# --- PROMPTS (Keep these exactly as they are) ---
message1=[
("system","You are a highly experienced and knowledgeable doctor, specializing in all fields of medicine"),
("human","Based on the conversation transcript of doctor and patient generate 5 bulleted points of questions which the doctor should ask the patient. the transcript is: {transcript}")
]
message2=[
("system","You are a highly experienced and knowledgeable doctor, specializing in all fields of medicine"),
("human","Based on the conversation transcript of doctor and patient generate 5 bulleted points of diagnosis which the patient has. the transcript is: {transcript}")
]
prompt_template1 = ChatPromptTemplate.from_messages(message1)
prompt_template2 = ChatPromptTemplate.from_messages(message2)
# --- UPDATED ASYNC FUNCTIONS ---
async def llm1(api_key, trans):
# Use ChatGoogleGenerativeAI with a Gemini model (e.g., gemini-1.5-flash)
# Note: The parameter is 'google_api_key', not just 'api_key'
LLM1 = ChatGoogleGenerativeAI(google_api_key=api_key, model="gemini-2.5-flash")
prompt1 = prompt_template1.invoke({"transcript": trans})
# 2. Use await + ainvoke
result_llm1 = await LLM1.ainvoke(prompt1)
return result_llm1.content
async def llm2(api_key, trans):
# Use ChatGoogleGenerativeAI
LLM2 = ChatGoogleGenerativeAI(google_api_key=api_key, model="gemini-2.5-flash")
prompt2 = prompt_template2.invoke({"transcript": trans})
# 2. Use await + ainvoke
result_llm2 = await LLM2.ainvoke(prompt2)
return result_llm2.content
# from langchain_core.prompts import ChatPromptTemplate
# from langchain_openai import ChatOpenAI
# # --- PROMPTS (Keep these exactly as they are) ---
# message1=[
# ("system","You are a highly experienced and knowledgeable doctor, specializing in all fields of medicine"),
# ("human","Based on the conversation transcript of doctor and patient generate 5 bulleted points of questions which the doctor should ask the patient. the transcript is: {transcript}")
# ]
# message2=[
# ("system","You are a highly experienced and knowledgeable doctor, specializing in all fields of medicine"),
# ("human","Based on the conversation transcript of doctor and patient generate 5 bulleted points of diagnosis which the patient has. the transcript is: {transcript}")
# ]
# # (Message 3 is omitted here as you don't need it for the WebSocket version, but you can keep it if you want)
# prompt_template1 = ChatPromptTemplate.from_messages(message1)
# prompt_template2 = ChatPromptTemplate.from_messages(message2)
# # --- UPDATED ASYNC FUNCTIONS ---
# # 1. Make the function async
# async def llm1(api_key, trans):
# # Use gpt-4o-mini or gpt-3.5-turbo (gpt-5-nano is not a standard public model name yet)
# LLM1 = ChatOpenAI(api_key=api_key, model="gpt-4o-mini")
# prompt1 = prompt_template1.invoke({"transcript": trans})
# # 2. Use await + ainvoke
# result_llm1 = await LLM1.ainvoke(prompt1)
# return result_llm1.content
# # 1. Make the function async
# async def llm2(api_key, trans):
# LLM2 = ChatOpenAI(api_key=api_key, model="gpt-4o-mini")
# prompt2 = prompt_template2.invoke({"transcript": trans})
# # 2. Use await + ainvoke
# result_llm2 = await LLM2.ainvoke(prompt2)
# return result_llm2.content
# # Note: llm3 is removed because the browser handles speech merging automatically now.
# # If you still need it for other parts of your app, you can keep it,
# # but make it 'async def' and use 'await LLM3.ainvoke' as well.